WebRTC: silence data not sent for disabled audio track
authorcommit-queue@webkit.org <commit-queue@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Fri, 14 Jul 2017 21:43:35 +0000 (21:43 +0000)
committercommit-queue@webkit.org <commit-queue@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Fri, 14 Jul 2017 21:43:35 +0000 (21:43 +0000)
https://bugs.webkit.org/show_bug.cgi?id=174456
<rdar://problem/33284623>

Patch by Youenn Fablet <youenn@apple.com> on 2017-07-14
Reviewed by Eric Carlson.

Source/WebCore:

Test: webrtc/audio-muted-stats.html
      webrtc/audio-muted-stats2.html

Adding a timer-based approach to send 10ms of silence every second.
This is consistent with how muted video tracks are implemented.
In case the audio track is muted at the time it is added, no silence data is sent.

* platform/mediastream/mac/RealtimeOutgoingAudioSource.cpp:
(WebCore::RealtimeOutgoingAudioSource::RealtimeOutgoingAudioSource):
(WebCore::RealtimeOutgoingAudioSource::initializeConverter):
(WebCore::RealtimeOutgoingAudioSource::stop):
(WebCore::RealtimeOutgoingAudioSource::sourceMutedChanged):
(WebCore::RealtimeOutgoingAudioSource::sourceEnabledChanged):
(WebCore::RealtimeOutgoingAudioSource::handleMutedIfNeeded):
(WebCore::RealtimeOutgoingAudioSource::sendSilence):
* platform/mediastream/mac/RealtimeOutgoingAudioSource.h:

LayoutTests:

* webrtc/audio-muted-stats-expected.txt: Added.
* webrtc/audio-muted-stats.html: Added.
* webrtc/audio-muted-stats2-expected.txt: Added.
* webrtc/audio-muted-stats2.html: Added.

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@219524 268f45cc-cd09-0410-ab3c-d52691b4dbfc

LayoutTests/ChangeLog
LayoutTests/webrtc/audio-muted-stats-expected.txt [new file with mode: 0644]
LayoutTests/webrtc/audio-muted-stats.html [new file with mode: 0644]
LayoutTests/webrtc/audio-muted-stats2-expected.txt [new file with mode: 0644]
LayoutTests/webrtc/audio-muted-stats2.html [new file with mode: 0644]
Source/WebCore/ChangeLog
Source/WebCore/platform/mediastream/mac/RealtimeOutgoingAudioSource.cpp
Source/WebCore/platform/mediastream/mac/RealtimeOutgoingAudioSource.h

index a88586a..4d82826 100644 (file)
@@ -1,3 +1,16 @@
+2017-07-14  Youenn Fablet  <youenn@apple.com>
+
+        WebRTC: silence data not sent for disabled audio track
+        https://bugs.webkit.org/show_bug.cgi?id=174456
+        <rdar://problem/33284623>
+
+        Reviewed by Eric Carlson.
+
+        * webrtc/audio-muted-stats-expected.txt: Added.
+        * webrtc/audio-muted-stats.html: Added.
+        * webrtc/audio-muted-stats2-expected.txt: Added.
+        * webrtc/audio-muted-stats2.html: Added.
+
 2017-07-14  Jonathan Bedard  <jbedard@apple.com>
 
         Add High Sierra test expectations
diff --git a/LayoutTests/webrtc/audio-muted-stats-expected.txt b/LayoutTests/webrtc/audio-muted-stats-expected.txt
new file mode 100644 (file)
index 0000000..96a787d
--- /dev/null
@@ -0,0 +1,3 @@
+
+PASS Audio silent data being sent in case track is muted 
+
diff --git a/LayoutTests/webrtc/audio-muted-stats.html b/LayoutTests/webrtc/audio-muted-stats.html
new file mode 100644 (file)
index 0000000..992b706
--- /dev/null
@@ -0,0 +1,72 @@
+<!doctype html>
+<html>
+    <head>
+        <meta charset="utf-8">
+        <title>Testing basic video exchange from offerer to receiver</title>
+        <script src="../resources/testharness.js"></script>
+        <script src="../resources/testharnessreport.js"></script>
+    </head>
+    <body>
+        <script src ="routines.js"></script>
+        <script>
+function getOutboundRTPStats(connection)
+{
+    return connection.getStats().then((report) => {
+        var stats;
+        report.forEach((statItem) => {
+            if (statItem.type === "outbound-rtp") {
+                stats = statItem;
+            }
+        });
+        return stats;
+    });
+}
+
+function checkOutboundBytesSentIncreased(firstConnection, statsFirstConnection, count)
+{
+    return getOutboundRTPStats(firstConnection).then((stats) => {
+       if (stats.bytesSent > statsFirstConnection.bytesSent)
+            return;
+        if (++count === 50)
+            return Promise.reject("checking outbound stats bytes sent increasing timed out");
+        return waitFor(50).then(() => {
+            return checkOutboundBytesSentIncreased(firstConnection, statsFirstConnection, count)
+        });
+    });
+}
+
+var track, firstConnection;
+promise_test((test) => {
+    if (window.testRunner)
+        testRunner.setUserMediaPermission(true);
+
+    return navigator.mediaDevices.getUserMedia({ audio: true}).then((stream) => {
+       track = stream.getAudioTracks()[0];
+        return new Promise((resolve, reject) => {
+            createConnections((connection) => {
+                firstConnection = connection;
+                firstConnection.addTrack(track, stream);
+            }, (connection) => {
+                connection.ontrack = resolve;
+            });
+            setTimeout(() => reject("Test timed out"), 5000);
+        });
+    }).then(() => {
+        return getOutboundRTPStats(firstConnection);
+    }).then((stats) => {
+        statsFirstConnection = stats;
+        return checkOutboundBytesSentIncreased(firstConnection, statsFirstConnection, 0);
+    }).then(() => {
+       track.enabled = false;
+        // Let's wait a little bit so that audio is disabled.
+       return waitFor(100);
+    }).then((stats) => {
+        return getOutboundRTPStats(firstConnection);
+    }).then((stats) => {
+        statsFirstConnection = stats;
+        return checkOutboundBytesSentIncreased(firstConnection, statsFirstConnection, 0);
+    });
+}, "Audio silent data being sent in case track is muted");
+        </script>
+    </body>
+</html>
diff --git a/LayoutTests/webrtc/audio-muted-stats2-expected.txt b/LayoutTests/webrtc/audio-muted-stats2-expected.txt
new file mode 100644 (file)
index 0000000..621b5ff
--- /dev/null
@@ -0,0 +1,3 @@
+
+PASS Audio silent data not being sent in case track is muted from the start 
+
diff --git a/LayoutTests/webrtc/audio-muted-stats2.html b/LayoutTests/webrtc/audio-muted-stats2.html
new file mode 100644 (file)
index 0000000..789a1b2
--- /dev/null
@@ -0,0 +1,64 @@
+<!doctype html>
+<html>
+    <head>
+        <meta charset="utf-8">
+        <title>Testing basic video exchange from offerer to receiver</title>
+        <script src="../resources/testharness.js"></script>
+        <script src="../resources/testharnessreport.js"></script>
+    </head>
+    <body>
+        <script src ="routines.js"></script>
+        <script>
+function getOutboundRTPStats(connection)
+{
+    return connection.getStats().then((report) => {
+        var stats;
+        report.forEach((statItem) => {
+            if (statItem.type === "outbound-rtp") {
+                stats = statItem;
+            }
+        });
+        return stats;
+    });
+}
+
+function checkOutboundBytesSentNotIncreasing(firstConnection, statsFirstConnection, count)
+{
+    return getOutboundRTPStats(firstConnection).then((stats) => {
+       if (stats.bytesSent > statsFirstConnection.bytesSent)
+            return Promise.reject("outbound stats bytes sent increasing");
+        if (++count === 10)
+            return;
+        return waitFor(50).then(() => {
+            return checkOutboundBytesSentNotIncreasing(firstConnection, statsFirstConnection, count);
+        });
+    });
+}
+
+var track, firstConnection;
+promise_test((test) => {
+    if (window.testRunner)
+        testRunner.setUserMediaPermission(true);
+
+    return navigator.mediaDevices.getUserMedia({ audio: true}).then((stream) => {
+       track = stream.getAudioTracks()[0];
+       track.enabled = false;
+        return new Promise((resolve, reject) => {
+            createConnections((connection) => {
+                firstConnection = connection;
+                firstConnection.addTrack(track, stream);
+            }, (connection) => {
+                connection.ontrack = resolve;
+            });
+            setTimeout(() => reject("Test timed out"), 5000);
+        });
+    }).then(() => {
+        return getOutboundRTPStats(firstConnection);
+    }).then((stats) => {
+        statsFirstConnection = stats;
+        return checkOutboundBytesSentNotIncreasing(firstConnection, statsFirstConnection, 0);
+    });
+}, "Audio silent data not being sent in case track is muted from the start");
+        </script>
+    </body>
+</html>
index 2285ff1..e1dac7c 100644 (file)
@@ -1,3 +1,28 @@
+2017-07-14  Youenn Fablet  <youenn@apple.com>
+
+        WebRTC: silence data not sent for disabled audio track
+        https://bugs.webkit.org/show_bug.cgi?id=174456
+        <rdar://problem/33284623>
+
+        Reviewed by Eric Carlson.
+
+        Test: webrtc/audio-muted-stats.html
+              webrtc/audio-muted-stats2.html
+
+        Adding a timer-based approach to send 10ms of silence every second.
+        This is consistent with how muted video tracks are implemented.
+        In case the audio track is muted at the time it is added, no silence data is sent.
+
+        * platform/mediastream/mac/RealtimeOutgoingAudioSource.cpp:
+        (WebCore::RealtimeOutgoingAudioSource::RealtimeOutgoingAudioSource):
+        (WebCore::RealtimeOutgoingAudioSource::initializeConverter):
+        (WebCore::RealtimeOutgoingAudioSource::stop):
+        (WebCore::RealtimeOutgoingAudioSource::sourceMutedChanged):
+        (WebCore::RealtimeOutgoingAudioSource::sourceEnabledChanged):
+        (WebCore::RealtimeOutgoingAudioSource::handleMutedIfNeeded):
+        (WebCore::RealtimeOutgoingAudioSource::sendSilence):
+        * platform/mediastream/mac/RealtimeOutgoingAudioSource.h:
+
 2017-07-14  Michael Catanzaro  <mcatanzaro@igalia.com>
 
         [CMake] Unclear distinction between WebKitHelpers and WebKitMacros
index e72e15c..1bdeb48 100644 (file)
@@ -49,6 +49,7 @@ static inline AudioStreamBasicDescription libwebrtcAudioFormat(Float64 sampleRat
 RealtimeOutgoingAudioSource::RealtimeOutgoingAudioSource(Ref<MediaStreamTrackPrivate>&& audioSource)
     : m_audioSource(WTFMove(audioSource))
     , m_sampleConverter(AudioSampleDataSource::create(LibWebRTCAudioFormat::sampleRate * 2))
+    , m_silenceAudioTimer(*this, &RealtimeOutgoingAudioSource::sendSilence)
 {
     m_audioSource->addObserver(*this);
     initializeConverter();
@@ -68,24 +69,51 @@ void RealtimeOutgoingAudioSource::initializeConverter()
 {
     m_muted = m_audioSource->muted();
     m_enabled = m_audioSource->enabled();
-    m_sampleConverter->setMuted(m_muted || !m_enabled);
+    handleMutedIfNeeded();
 }
 
 void RealtimeOutgoingAudioSource::stop()
 {
+    m_silenceAudioTimer.stop();
     m_audioSource->removeObserver(*this);
 }
 
 void RealtimeOutgoingAudioSource::sourceMutedChanged()
 {
     m_muted = m_audioSource->muted();
-    m_sampleConverter->setMuted(m_muted || !m_enabled);
+    handleMutedIfNeeded();
 }
 
 void RealtimeOutgoingAudioSource::sourceEnabledChanged()
 {
     m_enabled = m_audioSource->enabled();
-    m_sampleConverter->setMuted(m_muted || !m_enabled);
+    handleMutedIfNeeded();
+}
+
+void RealtimeOutgoingAudioSource::handleMutedIfNeeded()
+{
+    bool isSilenced = m_muted || !m_enabled;
+    m_sampleConverter->setMuted(isSilenced);
+    if (isSilenced && !m_silenceAudioTimer.isActive())
+        m_silenceAudioTimer.startRepeating(1_s);
+    if (!isSilenced && m_silenceAudioTimer.isActive())
+        m_silenceAudioTimer.stop();
+}
+
+void RealtimeOutgoingAudioSource::sendSilence()
+{
+    LibWebRTCProvider::callOnWebRTCSignalingThread([this, protectedThis = makeRef(*this)] {
+        size_t chunkSampleCount = m_outputStreamDescription.sampleRate() / 100;
+        size_t bufferSize = chunkSampleCount * LibWebRTCAudioFormat::sampleByteSize * m_outputStreamDescription.numberOfChannels();
+
+        if (!bufferSize)
+            return;
+
+        m_audioBuffer.grow(bufferSize);
+        memset(m_audioBuffer.data(), 0, bufferSize);
+        for (auto sink : m_sinks)
+            sink->OnData(m_audioBuffer.data(), LibWebRTCAudioFormat::sampleSize, m_outputStreamDescription.sampleRate(), m_outputStreamDescription.numberOfChannels(), chunkSampleCount);
+    });
 }
 
 bool RealtimeOutgoingAudioSource::isReachingBufferedAudioDataHighLimit()
index b341ffc..3ecc800 100644 (file)
@@ -33,6 +33,7 @@
 #include "AudioSampleDataSource.h"
 #include "LibWebRTCMacros.h"
 #include "MediaStreamTrackPrivate.h"
+#include "Timer.h"
 #include <webrtc/api/mediastreaminterface.h>
 #include <wtf/ThreadSafeRefCounted.h>
 
@@ -83,6 +84,8 @@ private:
     void pullAudioData();
 
     void initializeConverter();
+    void handleMutedIfNeeded();
+    void sendSilence();
 
     Vector<webrtc::AudioTrackSinkInterface*> m_sinks;
     Ref<MediaStreamTrackPrivate> m_audioSource;
@@ -96,6 +99,7 @@ private:
     bool m_muted { false };
     bool m_enabled { true };
     bool m_skippingAudioData { false };
+    Timer m_silenceAudioTimer;
 };
 
 } // namespace WebCore