/* * Copyright (C) 2017 Igalia S.L * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public License * aint with this library; see the file COPYING.LIB. If not, write to * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. */ #include "config.h" #if USE(LIBWEBRTC) && USE(GSTREAMER) #include "RealtimeOutgoingAudioSourceLibWebRTC.h" #include "LibWebRTCAudioFormat.h" #include "LibWebRTCProvider.h" #include "NotImplemented.h" #include "gstreamer/GStreamerAudioData.h" namespace WebCore { RealtimeOutgoingAudioSourceLibWebRTC::RealtimeOutgoingAudioSourceLibWebRTC(Ref&& audioSource) : RealtimeOutgoingAudioSource(WTFMove(audioSource)) { m_adapter = adoptGRef(gst_adapter_new()), m_sampleConverter = nullptr; } RealtimeOutgoingAudioSourceLibWebRTC::~RealtimeOutgoingAudioSourceLibWebRTC() { if (m_sampleConverter) g_clear_pointer(&m_sampleConverter, gst_audio_converter_free); } Ref RealtimeOutgoingAudioSource::create(Ref&& audioSource) { return RealtimeOutgoingAudioSourceLibWebRTC::create(WTFMove(audioSource)); } static inline std::unique_ptr libwebrtcAudioFormat(int sampleRate, size_t channelCount) { GstAudioFormat format = gst_audio_format_build_integer( LibWebRTCAudioFormat::isSigned, LibWebRTCAudioFormat::isBigEndian ? G_BIG_ENDIAN : G_LITTLE_ENDIAN, LibWebRTCAudioFormat::sampleSize, LibWebRTCAudioFormat::sampleSize); GstAudioInfo info; size_t libWebRTCChannelCount = channelCount >= 2 ? 2 : channelCount; gst_audio_info_set_format(&info, format, sampleRate, libWebRTCChannelCount, nullptr); return std::unique_ptr(new GStreamerAudioStreamDescription(info)); } void RealtimeOutgoingAudioSourceLibWebRTC::audioSamplesAvailable(const MediaTime&, const PlatformAudioData& audioData, const AudioStreamDescription& streamDescription, size_t /* sampleCount */) { auto data = static_cast(audioData); auto desc = static_cast(streamDescription); if (m_sampleConverter && !gst_audio_info_is_equal(m_inputStreamDescription->getInfo(), desc.getInfo())) { GST_ERROR_OBJECT(this, "FIXME - Audio format renegotiation is not possible yet!"); g_clear_pointer(&m_sampleConverter, gst_audio_converter_free); } if (!m_sampleConverter) { m_inputStreamDescription = std::unique_ptr(new GStreamerAudioStreamDescription(desc.getInfo())); m_outputStreamDescription = libwebrtcAudioFormat(LibWebRTCAudioFormat::sampleRate, streamDescription.numberOfChannels()); m_sampleConverter = gst_audio_converter_new(GST_AUDIO_CONVERTER_FLAG_IN_WRITABLE, m_inputStreamDescription->getInfo(), m_outputStreamDescription->getInfo(), nullptr); } LockHolder locker(m_adapterMutex); auto buffer = gst_sample_get_buffer(data.getSample()); gst_adapter_push(m_adapter.get(), gst_buffer_ref(buffer)); LibWebRTCProvider::callOnWebRTCSignalingThread([protectedThis = makeRef(*this)] { protectedThis->pullAudioData(); }); } void RealtimeOutgoingAudioSourceLibWebRTC::pullAudioData() { if (!m_inputStreamDescription || !m_outputStreamDescription) { GST_INFO("No stream description set yet."); return; } size_t outChunkSampleCount = LibWebRTCAudioFormat::chunkSampleCount; size_t outBufferSize = outChunkSampleCount * m_outputStreamDescription->getInfo()->bpf; LockHolder locker(m_adapterMutex); size_t inChunkSampleCount = gst_audio_converter_get_in_frames(m_sampleConverter, outChunkSampleCount); size_t inBufferSize = inChunkSampleCount * m_inputStreamDescription->getInfo()->bpf; auto available = gst_adapter_available(m_adapter.get()); if (inBufferSize > available) { GST_DEBUG("Not enough data: wanted: %ld > %ld available", inBufferSize, available); return; } auto inbuf = adoptGRef(gst_adapter_take_buffer(m_adapter.get(), inBufferSize)); GstMapInfo inmap; gst_buffer_map(inbuf.get(), &inmap, static_cast(GST_MAP_READ)); GstMapInfo outmap; auto outbuf = adoptGRef(gst_buffer_new_allocate(nullptr, outBufferSize, 0)); gst_buffer_map(outbuf.get(), &outmap, static_cast(GST_MAP_WRITE)); gpointer in[1] = { inmap.data }; gpointer out[1] = { outmap.data }; if (gst_audio_converter_samples(m_sampleConverter, static_cast(0), in, inChunkSampleCount, out, outChunkSampleCount)) { for (auto sink : m_sinks) { sink->OnData(outmap.data, LibWebRTCAudioFormat::sampleSize, static_cast(m_outputStreamDescription->sampleRate()), static_cast(m_outputStreamDescription->numberOfChannels()), outChunkSampleCount); } } else GST_ERROR("Could not convert samples."); gst_buffer_unmap(inbuf.get(), &inmap); gst_buffer_unmap(outbuf.get(), &outmap); } bool RealtimeOutgoingAudioSourceLibWebRTC::isReachingBufferedAudioDataHighLimit() { notImplemented(); return false; } bool RealtimeOutgoingAudioSourceLibWebRTC::isReachingBufferedAudioDataLowLimit() { notImplemented(); return false; } bool RealtimeOutgoingAudioSourceLibWebRTC::hasBufferedEnoughData() { notImplemented(); return false; } } // namespace WebCore #endif // USE(LIBWEBRTC)