c9bff641d2c26feaafa1dcc7bc3ac2169df6e5a5
[WebKit-https.git] / Source / WebCore / platform / mediarecorder / cocoa / AudioSampleBufferCompressor.mm
1 /*
2  * Copyright (C) 2020 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1.  Redistributions of source code must retain the above copyright
8  *     notice, this list of conditions and the following disclaimer.
9  * 2.  Redistributions in binary form must reproduce the above copyright
10  *     notice, this list of conditions and the following disclaimer in the
11  *     documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
15  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
16  * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
17  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
18  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
20  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
22  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23  */
24
25 #include "config.h"
26 #include "AudioSampleBufferCompressor.h"
27
28 #if ENABLE(MEDIA_STREAM) && USE(AVFOUNDATION)
29
30 #include "Logging.h"
31 #include <AudioToolbox/AudioCodec.h>
32 #include <AudioToolbox/AudioConverter.h>
33 #include <AudioToolbox/AudioFormat.h>
34 #include <Foundation/Foundation.h>
35
36 #import <pal/cf/AudioToolboxSoftLink.h>
37
38 #define LOW_WATER_TIME_IN_SECONDS 0.1
39
40 namespace WebCore {
41
42 using namespace PAL;
43
44 std::unique_ptr<AudioSampleBufferCompressor> AudioSampleBufferCompressor::create(CMBufferQueueTriggerCallback callback, void* callbackObject)
45 {
46     auto compressor = std::unique_ptr<AudioSampleBufferCompressor>(new AudioSampleBufferCompressor());
47     if (!compressor->initialize(callback, callbackObject))
48         return nullptr;
49     return compressor;
50 }
51
52 AudioSampleBufferCompressor::AudioSampleBufferCompressor()
53     : m_serialDispatchQueue { dispatch_queue_create("com.apple.AudioSampleBufferCompressor", DISPATCH_QUEUE_SERIAL) }
54     , m_lowWaterTime { CMTimeMakeWithSeconds(LOW_WATER_TIME_IN_SECONDS, 1000) }
55 {
56 }
57
58 AudioSampleBufferCompressor::~AudioSampleBufferCompressor()
59 {
60     dispatch_release(m_serialDispatchQueue);
61 }
62
63 bool AudioSampleBufferCompressor::initialize(CMBufferQueueTriggerCallback callback, void* callbackObject)
64 {
65     CMBufferQueueRef inputBufferQueue;
66     if (auto error = CMBufferQueueCreate(kCFAllocatorDefault, 0, CMBufferQueueGetCallbacksForUnsortedSampleBuffers(), &inputBufferQueue)) {
67         RELEASE_LOG_ERROR(MediaStream, "AudioSampleBufferCompressor CMBufferQueueCreate for m_inputBufferQueue failed with %d", error);
68         return false;
69     }
70     m_inputBufferQueue = adoptCF(inputBufferQueue);
71
72     CMBufferQueueRef outputBufferQueue;
73     if (auto error = CMBufferQueueCreate(kCFAllocatorDefault, 0, CMBufferQueueGetCallbacksForUnsortedSampleBuffers(), &outputBufferQueue)) {
74         RELEASE_LOG_ERROR(MediaStream, "AudioSampleBufferCompressor CMBufferQueueCreate for m_outputBufferQueue failed with %d", error);
75         return false;
76     }
77     m_outputBufferQueue = adoptCF(outputBufferQueue);
78     CMBufferQueueInstallTrigger(m_outputBufferQueue.get(), callback, callbackObject, kCMBufferQueueTrigger_WhenDataBecomesReady, kCMTimeZero, NULL);
79
80     m_isEncoding = true;
81     return true;
82 }
83
84 void AudioSampleBufferCompressor::finish()
85 {
86     dispatch_sync(m_serialDispatchQueue, ^{
87         processSampleBuffersUntilLowWaterTime(kCMTimeInvalid);
88         auto error = CMBufferQueueMarkEndOfData(m_outputBufferQueue.get());
89         RELEASE_LOG_ERROR_IF(error, MediaStream, "AudioSampleBufferCompressor CMBufferQueueMarkEndOfData failed %d", error);
90         m_isEncoding = false;
91     });
92 }
93
94 bool AudioSampleBufferCompressor::initAudioConverterForSourceFormatDescription(CMFormatDescriptionRef formatDescription, AudioFormatID outputFormatID)
95 {
96     const auto *audioFormatListItem = CMAudioFormatDescriptionGetRichestDecodableFormat(formatDescription);
97     m_sourceFormat = audioFormatListItem->mASBD;
98
99     memset(&m_destinationFormat, 0, sizeof(AudioStreamBasicDescription));
100     m_destinationFormat.mFormatID = outputFormatID;
101     m_destinationFormat.mSampleRate = m_sourceFormat.mSampleRate;
102     m_destinationFormat.mChannelsPerFrame = m_sourceFormat.mChannelsPerFrame;
103
104     UInt32 size = sizeof(m_destinationFormat);
105     if (auto error = AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &size, &m_destinationFormat)) {
106         RELEASE_LOG_ERROR(MediaStream, "AudioSampleBufferCompressor AudioFormatGetProperty failed with %d", error);
107         return false;
108     }
109
110     AudioConverterRef converter;
111     if (auto error = AudioConverterNew(&m_sourceFormat, &m_destinationFormat, &converter)) {
112         RELEASE_LOG_ERROR(MediaStream, "AudioSampleBufferCompressor AudioConverterNew failed with %d", error);
113         return false;
114     }
115     m_converter = adoptCF(converter);
116
117     size_t cookieSize = 0;
118     const void *cookie = CMAudioFormatDescriptionGetMagicCookie(formatDescription, &cookieSize);
119     if (cookieSize) {
120         if (auto error = AudioConverterSetProperty(m_converter.get(), kAudioConverterDecompressionMagicCookie, (UInt32)cookieSize, cookie)) {
121             RELEASE_LOG_ERROR(MediaStream, "AudioSampleBufferCompressor setting kAudioConverterDecompressionMagicCookie failed with %d", error);
122             return false;
123         }
124     }
125
126     size = sizeof(m_sourceFormat);
127     if (auto error = AudioConverterGetProperty(m_converter.get(), kAudioConverterCurrentInputStreamDescription, &size, &m_sourceFormat)) {
128         RELEASE_LOG_ERROR(MediaStream, "AudioSampleBufferCompressor getting kAudioConverterCurrentInputStreamDescription failed with %d", error);
129         return false;
130     }
131
132     if (!m_sourceFormat.mBytesPerPacket) {
133         RELEASE_LOG_ERROR(MediaStream, "mBytesPerPacket should not be zero");
134         return false;
135     }
136
137     size = sizeof(m_destinationFormat);
138     if (auto error = AudioConverterGetProperty(m_converter.get(), kAudioConverterCurrentOutputStreamDescription, &size, &m_destinationFormat)) {
139         RELEASE_LOG_ERROR(MediaStream, "AudioSampleBufferCompressor getting kAudioConverterCurrentOutputStreamDescription failed with %d", error);
140         return false;
141     }
142
143     if (m_destinationFormat.mFormatID == kAudioFormatMPEG4AAC) {
144         // FIXME: Set outputBitRate according MediaRecorderOptions.audioBitsPerSecond.
145         UInt32 outputBitRate = 64000;
146         if (m_destinationFormat.mSampleRate >= 44100)
147             outputBitRate = 192000;
148         else if (m_destinationFormat.mSampleRate < 22000)
149             outputBitRate = 32000;
150
151         size = sizeof(outputBitRate);
152         if (auto error = AudioConverterSetProperty(m_converter.get(), kAudioConverterEncodeBitRate, size, &outputBitRate)) {
153             RELEASE_LOG_ERROR(MediaStream, "AudioSampleBufferCompressor setting kAudioConverterEncodeBitRate failed with %d", error);
154             return false;
155         }
156     }
157
158     if (!m_destinationFormat.mBytesPerPacket) {
159         // If the destination format is VBR, we need to get max size per packet from the converter.
160         size = sizeof(m_maxOutputPacketSize);
161
162         if (auto error = AudioConverterGetProperty(m_converter.get(), kAudioConverterPropertyMaximumOutputPacketSize, &size, &m_maxOutputPacketSize)) {
163             RELEASE_LOG_ERROR(MediaStream, "AudioSampleBufferCompressor getting kAudioConverterPropertyMaximumOutputPacketSize failed with %d", error);
164             return false;
165         }
166     }
167
168     auto destinationBufferSize = computeBufferSizeForAudioFormat(m_destinationFormat, m_maxOutputPacketSize, LOW_WATER_TIME_IN_SECONDS);
169     if (m_destinationBuffer.size() < destinationBufferSize)
170         m_destinationBuffer.resize(destinationBufferSize);
171     if (!m_destinationFormat.mBytesPerPacket)
172         m_destinationPacketDescriptions.resize(m_destinationBuffer.capacity() / m_maxOutputPacketSize);
173
174     return true;
175 }
176
177 size_t AudioSampleBufferCompressor::computeBufferSizeForAudioFormat(AudioStreamBasicDescription format, UInt32 maxOutputPacketSize, Float32 duration)
178 {
179     UInt32 numPackets = (format.mSampleRate * duration) / format.mFramesPerPacket;
180     UInt32 outputPacketSize = format.mBytesPerPacket ? format.mBytesPerPacket : maxOutputPacketSize;
181     UInt32 bufferSize = numPackets * outputPacketSize;
182
183     return bufferSize;
184 }
185
186 void AudioSampleBufferCompressor::attachPrimingTrimsIfNeeded(CMSampleBufferRef buffer)
187 {
188     if (CMTIME_IS_INVALID(m_remainingPrimeDuration)) {
189         AudioConverterPrimeInfo primeInfo { 0, 0 };
190         UInt32 size = sizeof(primeInfo);
191
192         if (auto error = AudioConverterGetProperty(m_converter.get(), kAudioConverterPrimeInfo, &size, &primeInfo)) {
193             RELEASE_LOG_ERROR(MediaStream, "AudioSampleBufferCompressor getting kAudioConverterPrimeInfo failed with %d", error);
194             return;
195         }
196
197         m_remainingPrimeDuration = CMTimeMake(primeInfo.leadingFrames, m_destinationFormat.mSampleRate);
198     }
199
200     if (CMTIME_COMPARE_INLINE(kCMTimeZero, <, m_remainingPrimeDuration)) {
201         CMTime sampleDuration = CMSampleBufferGetDuration(buffer);
202         CMTime trimDuration = CMTimeMinimum(sampleDuration, m_remainingPrimeDuration);
203         CFDictionaryRef trimAtStartDict = CMTimeCopyAsDictionary(trimDuration, kCFAllocatorDefault);
204         CMSetAttachment(buffer, kCMSampleBufferAttachmentKey_TrimDurationAtStart, trimAtStartDict, kCMAttachmentMode_ShouldPropagate);
205         CFRelease(trimAtStartDict);
206         m_remainingPrimeDuration = CMTimeSubtract(m_remainingPrimeDuration, trimDuration);
207     }
208 }
209
210 RetainPtr<NSNumber> AudioSampleBufferCompressor::gradualDecoderRefreshCount()
211 {
212     UInt32 delaySize = sizeof(uint32_t);
213     uint32_t originalDelayMode = 0;
214     if (auto error = AudioConverterGetProperty(m_converter.get(), kAudioCodecPropertyDelayMode, &delaySize, &originalDelayMode)) {
215         RELEASE_LOG_ERROR(MediaStream, "AudioSampleBufferCompressor getting kAudioCodecPropertyDelayMode failed with %d", error);
216         return nil;
217     }
218
219     uint32_t optimalDelayMode = kAudioCodecDelayMode_Optimal;
220     if (auto error = AudioConverterSetProperty(m_converter.get(), kAudioCodecPropertyDelayMode, delaySize, &optimalDelayMode)) {
221         RELEASE_LOG_ERROR(MediaStream, "AudioSampleBufferCompressor setting kAudioCodecPropertyDelayMode failed with %d", error);
222         return nil;
223     }
224
225     UInt32 primeSize = sizeof(AudioCodecPrimeInfo);
226     AudioCodecPrimeInfo primeInfo { 0, 0 };
227     if (auto error = AudioConverterGetProperty(m_converter.get(), kAudioCodecPropertyPrimeInfo, &primeSize, &primeInfo)) {
228         RELEASE_LOG_ERROR(MediaStream, "AudioSampleBufferCompressor getting kAudioCodecPropertyPrimeInfo failed with %d", error);
229         return nil;
230     }
231
232     if (auto error = AudioConverterSetProperty(m_converter.get(), kAudioCodecPropertyDelayMode, delaySize, &originalDelayMode)) {
233         RELEASE_LOG_ERROR(MediaStream, "AudioSampleBufferCompressor setting kAudioCodecPropertyDelayMode failed with %d", error);
234         return nil;
235     }
236     return adoptNS([NSNumber numberWithInt:(primeInfo.leadingFrames / m_destinationFormat.mFramesPerPacket)]);
237 }
238
239 CMSampleBufferRef AudioSampleBufferCompressor::sampleBufferWithNumPackets(UInt32 numPackets, AudioBufferList fillBufferList)
240 {
241     Vector<char> cookie;
242     if (!m_destinationFormatDescription) {
243         UInt32 cookieSize = 0;
244
245         auto error = AudioConverterGetPropertyInfo(m_converter.get(), kAudioConverterCompressionMagicCookie, &cookieSize, NULL);
246         if ((error == noErr) && !!cookieSize) {
247             cookie.resize(cookieSize);
248
249             if (auto error = AudioConverterGetProperty(m_converter.get(), kAudioConverterCompressionMagicCookie, &cookieSize, cookie.data())) {
250                 RELEASE_LOG_ERROR(MediaStream, "AudioSampleBufferCompressor getting kAudioConverterCompressionMagicCookie failed with %d", error);
251                 return nil;
252             }
253         }
254
255         CMFormatDescriptionRef destinationFormatDescription;
256         if (auto error = CMAudioFormatDescriptionCreate(kCFAllocatorDefault, &m_destinationFormat, 0, NULL, cookieSize, cookie.data(), NULL, &destinationFormatDescription)) {
257             RELEASE_LOG_ERROR(MediaStream, "AudioSampleBufferCompressor CMAudioFormatDescriptionCreate failed with %d", error);
258             return nil;
259         }
260         m_destinationFormatDescription = adoptCF(destinationFormatDescription);
261         m_gdrCountNum = gradualDecoderRefreshCount();
262     }
263
264     char *data = static_cast<char*>(fillBufferList.mBuffers[0].mData);
265     size_t dataSize = fillBufferList.mBuffers[0].mDataByteSize;
266
267     CMBlockBufferRef blockBuffer;
268     if (auto error = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault, NULL, dataSize, kCFAllocatorDefault, NULL, 0, dataSize, kCMBlockBufferAssureMemoryNowFlag, &blockBuffer)) {
269         RELEASE_LOG_ERROR(MediaStream, "AudioSampleBufferCompressor CMBlockBufferCreateWithMemoryBlock failed with %d", error);
270         return nil;
271     }
272     auto buffer = adoptCF(blockBuffer);
273
274     if (auto error = CMBlockBufferReplaceDataBytes(data, buffer.get(), 0, dataSize)) {
275         RELEASE_LOG_ERROR(MediaStream, "AudioSampleBufferCompressor CMBlockBufferReplaceDataBytes failed with %d", error);
276         return nil;
277     }
278
279     CMSampleBufferRef sampleBuffer;
280     auto error = CMAudioSampleBufferCreateWithPacketDescriptions(kCFAllocatorDefault, buffer.get(), true, NULL, NULL, m_destinationFormatDescription.get(), numPackets, m_currentNativePresentationTimeStamp, m_destinationPacketDescriptions.data(), &sampleBuffer);
281     if (error) {
282         RELEASE_LOG_ERROR(MediaStream, "AudioSampleBufferCompressor CMAudioSampleBufferCreateWithPacketDescriptions failed with %d", error);
283         return nil;
284     }
285
286     if ([m_gdrCountNum intValue])
287         CMSetAttachment(sampleBuffer, kCMSampleBufferAttachmentKey_GradualDecoderRefresh, (__bridge CFTypeRef)m_gdrCountNum.get(), kCMAttachmentMode_ShouldPropagate);
288
289     return sampleBuffer;
290 }
291
292 OSStatus AudioSampleBufferCompressor::audioConverterComplexInputDataProc(AudioConverterRef, UInt32 *numOutputPacketsPtr, AudioBufferList *bufferList, AudioStreamPacketDescription **packetDescriptionOut, void *audioSampleBufferCompressor)
293 {
294     auto *compressor = static_cast<AudioSampleBufferCompressor*>(audioSampleBufferCompressor);
295     return compressor->provideSourceDataNumOutputPackets(numOutputPacketsPtr, bufferList, packetDescriptionOut);
296 }
297
298 OSStatus AudioSampleBufferCompressor::provideSourceDataNumOutputPackets(UInt32* numOutputPacketsPtr, AudioBufferList* audioBufferList, AudioStreamPacketDescription** packetDescriptionOut)
299 {
300     if (packetDescriptionOut)
301         *packetDescriptionOut = NULL;
302
303     const UInt32 numPacketsToCopy = *numOutputPacketsPtr;
304     size_t numBytesToCopy = (numPacketsToCopy * m_sourceFormat.mBytesPerPacket);
305
306     if (audioBufferList->mNumberBuffers == 1) {
307         size_t currentOffsetInSourceBuffer = 0;
308
309         if (m_sourceBuffer.size() < numBytesToCopy)
310             m_sourceBuffer.resize(numBytesToCopy);
311
312         while (numBytesToCopy) {
313             if (m_sampleBlockBufferSize <= m_currentOffsetInSampleBlockBuffer) {
314                 if (m_sampleBlockBuffer) {
315                     m_sampleBlockBuffer = nullptr;
316                     m_sampleBlockBufferSize = 0;
317                 }
318
319                 if (CMBufferQueueIsEmpty(m_inputBufferQueue.get()))
320                     break;
321
322                 auto sampleBuffer = adoptCF((CMSampleBufferRef)(const_cast<void*>(CMBufferQueueDequeueAndRetain(m_inputBufferQueue.get()))));
323                 m_sampleBlockBuffer = adoptCF((CMBlockBufferRef)(const_cast<void*>(CFRetain(CMSampleBufferGetDataBuffer(sampleBuffer.get())))));
324                 if (!m_sampleBlockBuffer) {
325                     RELEASE_LOG_ERROR(MediaStream, "AudioSampleBufferCompressor CMSampleBufferGetDataBuffer failed");
326                     m_sampleBlockBufferSize = 0;
327                     m_currentOffsetInSampleBlockBuffer = 0;
328                     continue;
329                 }
330                 m_sampleBlockBufferSize = CMBlockBufferGetDataLength(m_sampleBlockBuffer.get());
331                 m_currentOffsetInSampleBlockBuffer = 0;
332             }
333
334             if (m_sampleBlockBuffer) {
335                 size_t numBytesToCopyFromSampleBbuf = std::min(numBytesToCopy, (m_sampleBlockBufferSize - m_currentOffsetInSampleBlockBuffer));
336                 if (auto error = CMBlockBufferCopyDataBytes(m_sampleBlockBuffer.get(), m_currentOffsetInSampleBlockBuffer, numBytesToCopyFromSampleBbuf, (m_sourceBuffer.data() + currentOffsetInSourceBuffer))) {
337                     RELEASE_LOG_ERROR(MediaStream, "AudioSampleBufferCompressor CMBlockBufferCopyDataBytes failed with %d", error);
338                     return error;
339                 }
340                 numBytesToCopy -= numBytesToCopyFromSampleBbuf;
341                 currentOffsetInSourceBuffer += numBytesToCopyFromSampleBbuf;
342                 m_currentOffsetInSampleBlockBuffer += numBytesToCopyFromSampleBbuf;
343             }
344         }
345         audioBufferList->mBuffers[0].mData = m_sourceBuffer.data();
346         audioBufferList->mBuffers[0].mDataByteSize = currentOffsetInSourceBuffer;
347         audioBufferList->mBuffers[0].mNumberChannels = m_sourceFormat.mChannelsPerFrame;
348
349         *numOutputPacketsPtr = (audioBufferList->mBuffers[0].mDataByteSize / m_sourceFormat.mBytesPerPacket);
350         return noErr;
351     }
352
353     ASSERT(audioBufferList->mNumberBuffers == 2);
354
355     // FIXME: Support interleaved data by uninterleaving m_sourceBuffer if needed.
356     ASSERT(m_sourceFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved);
357
358     if (m_sourceBuffer.size() < 2 * numBytesToCopy)
359         m_sourceBuffer.resize(2 * numBytesToCopy);
360     auto* firstChannel = m_sourceBuffer.data();
361     auto* secondChannel = m_sourceBuffer.data() + numBytesToCopy;
362
363     size_t currentOffsetInSourceBuffer = 0;
364     while (numBytesToCopy) {
365         if (m_sampleBlockBufferSize <= m_currentOffsetInSampleBlockBuffer) {
366             if (m_sampleBlockBuffer) {
367                 m_sampleBlockBuffer = nullptr;
368                 m_sampleBlockBufferSize = 0;
369             }
370
371             if (CMBufferQueueIsEmpty(m_inputBufferQueue.get()))
372                 break;
373
374             auto sampleBuffer = adoptCF((CMSampleBufferRef)(const_cast<void*>(CMBufferQueueDequeueAndRetain(m_inputBufferQueue.get()))));
375             m_sampleBlockBuffer = adoptCF((CMBlockBufferRef)(const_cast<void*>(CFRetain(CMSampleBufferGetDataBuffer(sampleBuffer.get())))));
376             if (!m_sampleBlockBuffer) {
377                 RELEASE_LOG_ERROR(MediaStream, "AudioSampleBufferCompressor CMSampleBufferGetDataBuffer failed");
378                 m_sampleBlockBufferSize = 0;
379                 m_currentOffsetInSampleBlockBuffer = 0;
380                 continue;
381             }
382             m_sampleBlockBufferSize = CMBlockBufferGetDataLength(m_sampleBlockBuffer.get()) / 2;
383             m_currentOffsetInSampleBlockBuffer = 0;
384         }
385
386         if (m_sampleBlockBuffer) {
387             size_t numBytesToCopyFromSampleBbuf = std::min(numBytesToCopy, (m_sampleBlockBufferSize - m_currentOffsetInSampleBlockBuffer));
388             if (auto error = CMBlockBufferCopyDataBytes(m_sampleBlockBuffer.get(), m_currentOffsetInSampleBlockBuffer, numBytesToCopyFromSampleBbuf, (firstChannel + currentOffsetInSourceBuffer))) {
389                 RELEASE_LOG_ERROR(MediaStream, "AudioSampleBufferCompressor CMBlockBufferCopyDataBytes first channel failed with %d", error);
390                 return error;
391             }
392             if (auto error = CMBlockBufferCopyDataBytes(m_sampleBlockBuffer.get(), m_currentOffsetInSampleBlockBuffer + m_sampleBlockBufferSize, numBytesToCopyFromSampleBbuf, (secondChannel + currentOffsetInSourceBuffer))) {
393                 RELEASE_LOG_ERROR(MediaStream, "AudioSampleBufferCompressor CMBlockBufferCopyDataBytes second channel failed with %d", error);
394                 return error;
395             }
396             numBytesToCopy -= numBytesToCopyFromSampleBbuf;
397             currentOffsetInSourceBuffer += numBytesToCopyFromSampleBbuf;
398             m_currentOffsetInSampleBlockBuffer += numBytesToCopyFromSampleBbuf;
399         }
400     }
401
402     audioBufferList->mBuffers[0].mData = firstChannel;
403     audioBufferList->mBuffers[0].mDataByteSize = currentOffsetInSourceBuffer;
404     audioBufferList->mBuffers[0].mNumberChannels = 1;
405
406     audioBufferList->mBuffers[1].mData = secondChannel;
407     audioBufferList->mBuffers[1].mDataByteSize = currentOffsetInSourceBuffer;
408     audioBufferList->mBuffers[1].mNumberChannels = 1;
409
410     *numOutputPacketsPtr = (audioBufferList->mBuffers[0].mDataByteSize / m_sourceFormat.mBytesPerPacket);
411     return noErr;
412 }
413
414 void AudioSampleBufferCompressor::processSampleBuffersUntilLowWaterTime(CMTime lowWaterTime)
415 {
416     if (!m_converter) {
417         if (CMBufferQueueIsEmpty(m_inputBufferQueue.get()))
418             return;
419
420         auto buffer = (CMSampleBufferRef)(const_cast<void*>(CMBufferQueueGetHead(m_inputBufferQueue.get())));
421         ASSERT(buffer);
422
423         m_currentNativePresentationTimeStamp = CMSampleBufferGetPresentationTimeStamp(buffer);
424         m_currentOutputPresentationTimeStamp = CMSampleBufferGetOutputPresentationTimeStamp(buffer);
425
426         auto formatDescription = CMSampleBufferGetFormatDescription(buffer);
427         if (!initAudioConverterForSourceFormatDescription(formatDescription, m_outputCodecType))
428             return;
429     }
430
431     while (CMTIME_IS_INVALID(lowWaterTime) || CMTIME_COMPARE_INLINE(lowWaterTime, <, CMBufferQueueGetDuration(m_inputBufferQueue.get()))) {
432         AudioBufferList fillBufferList;
433
434         fillBufferList.mNumberBuffers = 1;
435         fillBufferList.mBuffers[0].mNumberChannels = m_destinationFormat.mChannelsPerFrame;
436         fillBufferList.mBuffers[0].mDataByteSize = (UInt32)m_destinationBuffer.capacity();
437         fillBufferList.mBuffers[0].mData = m_destinationBuffer.data();
438
439         UInt32 outputPacketSize = m_destinationFormat.mBytesPerPacket ? m_destinationFormat.mBytesPerPacket : m_maxOutputPacketSize;
440         UInt32 numOutputPackets = (UInt32)m_destinationBuffer.capacity() / outputPacketSize;
441
442         auto error = AudioConverterFillComplexBuffer(m_converter.get(), audioConverterComplexInputDataProc, this, &numOutputPackets, &fillBufferList, m_destinationPacketDescriptions.data());
443         if (error) {
444             RELEASE_LOG_ERROR(MediaStream, "AudioSampleBufferCompressor AudioConverterFillComplexBuffer failed with %d", error);
445             return;
446         }
447
448         if (!numOutputPackets)
449             break;
450
451         auto buffer = sampleBufferWithNumPackets(numOutputPackets, fillBufferList);
452
453         attachPrimingTrimsIfNeeded(buffer);
454
455         error = CMSampleBufferSetOutputPresentationTimeStamp(buffer, m_currentOutputPresentationTimeStamp);
456         if (error) {
457             RELEASE_LOG_ERROR(MediaStream, "AudioSampleBufferCompressor CMSampleBufferSetOutputPresentationTimeStamp failed with %d", error);
458             return;
459         }
460
461         CMTime nativeDuration = CMSampleBufferGetDuration(buffer);
462         m_currentNativePresentationTimeStamp = CMTimeAdd(m_currentNativePresentationTimeStamp, nativeDuration);
463
464         CMTime outputDuration = CMSampleBufferGetOutputDuration(buffer);
465         m_currentOutputPresentationTimeStamp = CMTimeAdd(m_currentOutputPresentationTimeStamp, outputDuration);
466
467         error = CMBufferQueueEnqueue(m_outputBufferQueue.get(), buffer);
468         if (error) {
469             RELEASE_LOG_ERROR(MediaStream, "AudioSampleBufferCompressor CMBufferQueueEnqueue failed with %d", error);
470             return;
471         }
472     }
473 }
474
475 void AudioSampleBufferCompressor::processSampleBuffer(CMSampleBufferRef buffer)
476 {
477     auto error = CMBufferQueueEnqueue(m_inputBufferQueue.get(), buffer);
478     RELEASE_LOG_ERROR_IF(error, MediaStream, "AudioSampleBufferCompressor CMBufferQueueEnqueue failed with %d", error);
479
480     processSampleBuffersUntilLowWaterTime(m_lowWaterTime);
481 }
482
483 void AudioSampleBufferCompressor::addSampleBuffer(CMSampleBufferRef buffer)
484 {
485     dispatch_sync(m_serialDispatchQueue, ^{
486         if (m_isEncoding)
487             processSampleBuffer(buffer);
488     });
489 }
490
491 CMSampleBufferRef AudioSampleBufferCompressor::getOutputSampleBuffer()
492 {
493     return (CMSampleBufferRef)(const_cast<void*>(CMBufferQueueGetHead(m_outputBufferQueue.get())));
494 }
495
496 RetainPtr<CMSampleBufferRef> AudioSampleBufferCompressor::takeOutputSampleBuffer()
497 {
498     return adoptCF((CMSampleBufferRef)(const_cast<void*>(CMBufferQueueDequeueAndRetain(m_outputBufferQueue.get()))));
499 }
500
501 }
502
503 #endif // ENABLE(MEDIA_STREAM) && USE(AVFOUNDATION)