2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * Copyright (C) 2013-2014 Apple Inc. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * * Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following disclaimer
13 * in the documentation and/or other materials provided with the
15 * * Neither the name of Google Inc. nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include "SourceBuffer.h"
35 #if ENABLE(MEDIA_SOURCE)
37 #include "AudioTrackList.h"
38 #include "BufferSource.h"
40 #include "EventNames.h"
41 #include "ExceptionCodePlaceholder.h"
42 #include "GenericEventQueue.h"
43 #include "HTMLMediaElement.h"
44 #include "InbandTextTrack.h"
46 #include "MediaDescription.h"
47 #include "MediaSample.h"
48 #include "MediaSource.h"
49 #include "SampleMap.h"
50 #include "SourceBufferList.h"
51 #include "SourceBufferPrivate.h"
52 #include "TextTrackList.h"
53 #include "TimeRanges.h"
54 #include "VideoTrackList.h"
57 #include <runtime/JSCInlines.h>
58 #include <runtime/JSLock.h>
59 #include <runtime/VM.h>
60 #include <wtf/CurrentTime.h>
61 #include <wtf/NeverDestroyed.h>
63 #include <wtf/text/StringBuilder.h>
68 static const double ExponentialMovingAverageCoefficient = 0.1;
70 struct SourceBuffer::TrackBuffer {
71 MediaTime lastDecodeTimestamp;
72 MediaTime lastFrameDuration;
73 MediaTime highestPresentationTimestamp;
74 MediaTime lastEnqueuedPresentationTime;
75 MediaTime lastEnqueuedDecodeEndTime;
76 bool needRandomAccessFlag { true };
77 bool enabled { false };
78 bool needsReenqueueing { false };
80 DecodeOrderSampleMap::MapType decodeQueue;
81 RefPtr<MediaDescription> description;
82 PlatformTimeRanges buffered;
85 : lastDecodeTimestamp(MediaTime::invalidTime())
86 , lastFrameDuration(MediaTime::invalidTime())
87 , highestPresentationTimestamp(MediaTime::invalidTime())
88 , lastEnqueuedPresentationTime(MediaTime::invalidTime())
89 , lastEnqueuedDecodeEndTime(MediaTime::invalidTime())
94 Ref<SourceBuffer> SourceBuffer::create(Ref<SourceBufferPrivate>&& sourceBufferPrivate, MediaSource* source)
96 auto sourceBuffer = adoptRef(*new SourceBuffer(WTFMove(sourceBufferPrivate), source));
97 sourceBuffer->suspendIfNeeded();
101 SourceBuffer::SourceBuffer(Ref<SourceBufferPrivate>&& sourceBufferPrivate, MediaSource* source)
102 : ActiveDOMObject(source->scriptExecutionContext())
103 , m_private(WTFMove(sourceBufferPrivate))
105 , m_asyncEventQueue(*this)
106 , m_appendBufferTimer(*this, &SourceBuffer::appendBufferTimerFired)
107 , m_appendWindowStart(MediaTime::zeroTime())
108 , m_appendWindowEnd(MediaTime::positiveInfiniteTime())
109 , m_groupStartTimestamp(MediaTime::invalidTime())
110 , m_groupEndTimestamp(MediaTime::zeroTime())
111 , m_buffered(TimeRanges::create())
112 , m_appendState(WaitingForSegment)
113 , m_timeOfBufferingMonitor(monotonicallyIncreasingTime())
114 , m_pendingRemoveStart(MediaTime::invalidTime())
115 , m_pendingRemoveEnd(MediaTime::invalidTime())
116 , m_removeTimer(*this, &SourceBuffer::removeTimerFired)
120 m_private->setClient(this);
123 SourceBuffer::~SourceBuffer()
127 m_private->setClient(nullptr);
130 ExceptionOr<Ref<TimeRanges>> SourceBuffer::buffered() const
132 // Section 3.1 buffered attribute steps.
133 // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#attributes-1
134 // 1. If this object has been removed from the sourceBuffers attribute of the parent media source then throw an
135 // INVALID_STATE_ERR exception and abort these steps.
137 return Exception { INVALID_STATE_ERR };
139 // 2. Return a new static normalized TimeRanges object for the media segments buffered.
140 return m_buffered->copy();
143 double SourceBuffer::timestampOffset() const
145 return m_timestampOffset.toDouble();
148 ExceptionOr<void> SourceBuffer::setTimestampOffset(double offset)
150 // Section 3.1 timestampOffset attribute setter steps.
151 // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#attributes-1
152 // 1. Let new timestamp offset equal the new value being assigned to this attribute.
153 // 2. If this object has been removed from the sourceBuffers attribute of the parent media source, then throw an
154 // INVALID_STATE_ERR exception and abort these steps.
155 // 3. If the updating attribute equals true, then throw an INVALID_STATE_ERR exception and abort these steps.
156 if (isRemoved() || m_updating)
157 return Exception { INVALID_STATE_ERR };
159 // 4. If the readyState attribute of the parent media source is in the "ended" state then run the following steps:
160 // 4.1 Set the readyState attribute of the parent media source to "open"
161 // 4.2 Queue a task to fire a simple event named sourceopen at the parent media source.
162 m_source->openIfInEndedState();
164 // 5. If the append state equals PARSING_MEDIA_SEGMENT, then throw an INVALID_STATE_ERR and abort these steps.
165 if (m_appendState == ParsingMediaSegment)
166 return Exception { INVALID_STATE_ERR };
168 MediaTime newTimestampOffset = MediaTime::createWithDouble(offset);
170 // 6. If the mode attribute equals "sequence", then set the group start timestamp to new timestamp offset.
171 if (m_mode == AppendMode::Sequence)
172 m_groupStartTimestamp = newTimestampOffset;
174 // 7. Update the attribute to the new value.
175 m_timestampOffset = newTimestampOffset;
180 double SourceBuffer::appendWindowStart() const
182 return m_appendWindowStart.toDouble();
185 ExceptionOr<void> SourceBuffer::setAppendWindowStart(double newValue)
187 // Section 3.1 appendWindowStart attribute setter steps.
188 // W3C Editor's Draft 16 September 2016
189 // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#dom-sourcebuffer-appendwindowstart
190 // 1. If this object has been removed from the sourceBuffers attribute of the parent media source,
191 // then throw an InvalidStateError exception and abort these steps.
192 // 2. If the updating attribute equals true, then throw an InvalidStateError exception and abort these steps.
193 if (isRemoved() || m_updating)
194 return Exception { INVALID_STATE_ERR };
196 // 3. If the new value is less than 0 or greater than or equal to appendWindowEnd then
197 // throw an TypeError exception and abort these steps.
198 if (newValue < 0 || newValue >= m_appendWindowEnd.toDouble())
199 return Exception { TypeError };
201 // 4. Update the attribute to the new value.
202 m_appendWindowStart = MediaTime::createWithDouble(newValue);
207 double SourceBuffer::appendWindowEnd() const
209 return m_appendWindowEnd.toDouble();
212 ExceptionOr<void> SourceBuffer::setAppendWindowEnd(double newValue)
214 // Section 3.1 appendWindowEnd attribute setter steps.
215 // W3C Editor's Draft 16 September 2016
216 // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#dom-sourcebuffer-appendwindowend
217 // 1. If this object has been removed from the sourceBuffers attribute of the parent media source,
218 // then throw an InvalidStateError exception and abort these steps.
219 // 2. If the updating attribute equals true, then throw an InvalidStateError exception and abort these steps.
220 if (isRemoved() || m_updating)
221 return Exception { INVALID_STATE_ERR };
223 // 3. If the new value equals NaN, then throw an TypeError and abort these steps.
224 // 4. If the new value is less than or equal to appendWindowStart then throw an TypeError exception
225 // and abort these steps.
226 if (std::isnan(newValue) || newValue <= m_appendWindowStart.toDouble())
227 return Exception { TypeError };
229 // 5.. Update the attribute to the new value.
230 m_appendWindowEnd = MediaTime::createWithDouble(newValue);
235 ExceptionOr<void> SourceBuffer::appendBuffer(const BufferSource& data)
237 return appendBufferInternal(static_cast<const unsigned char*>(data.data), data.length);
240 void SourceBuffer::resetParserState()
242 // Section 3.5.2 Reset Parser State algorithm steps.
243 // http://www.w3.org/TR/2014/CR-media-source-20140717/#sourcebuffer-reset-parser-state
244 // 1. If the append state equals PARSING_MEDIA_SEGMENT and the input buffer contains some complete coded frames,
245 // then run the coded frame processing algorithm until all of these complete coded frames have been processed.
246 // FIXME: If any implementation will work in pulling mode (instead of async push to SourceBufferPrivate, and forget)
247 // this should be handled somehow either here, or in m_private->abort();
249 // 2. Unset the last decode timestamp on all track buffers.
250 // 3. Unset the last frame duration on all track buffers.
251 // 4. Unset the highest presentation timestamp on all track buffers.
252 // 5. Set the need random access point flag on all track buffers to true.
253 for (auto& trackBufferPair : m_trackBufferMap.values()) {
254 trackBufferPair.lastDecodeTimestamp = MediaTime::invalidTime();
255 trackBufferPair.lastFrameDuration = MediaTime::invalidTime();
256 trackBufferPair.highestPresentationTimestamp = MediaTime::invalidTime();
257 trackBufferPair.needRandomAccessFlag = true;
259 // 6. Remove all bytes from the input buffer.
260 // Note: this is handled by abortIfUpdating()
261 // 7. Set append state to WAITING_FOR_SEGMENT.
262 m_appendState = WaitingForSegment;
264 m_private->resetParserState();
267 ExceptionOr<void> SourceBuffer::abort()
269 // Section 3.2 abort() method steps.
270 // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#dom-sourcebuffer-abort
271 // 1. If this object has been removed from the sourceBuffers attribute of the parent media source
272 // then throw an INVALID_STATE_ERR exception and abort these steps.
273 // 2. If the readyState attribute of the parent media source is not in the "open" state
274 // then throw an INVALID_STATE_ERR exception and abort these steps.
275 if (isRemoved() || !m_source->isOpen())
276 return Exception { INVALID_STATE_ERR };
278 // 3. If the range removal algorithm is running, then throw an InvalidStateError exception and abort these steps.
279 if (m_removeTimer.isActive())
280 return Exception { INVALID_STATE_ERR };
282 // 4. If the sourceBuffer.updating attribute equals true, then run the following steps: ...
285 // 5. Run the reset parser state algorithm.
288 // 6. Set appendWindowStart to the presentation start time.
289 m_appendWindowStart = MediaTime::zeroTime();
291 // 7. Set appendWindowEnd to positive Infinity.
292 m_appendWindowEnd = MediaTime::positiveInfiniteTime();
297 ExceptionOr<void> SourceBuffer::remove(double start, double end)
299 return remove(MediaTime::createWithDouble(start), MediaTime::createWithDouble(end));
302 ExceptionOr<void> SourceBuffer::remove(const MediaTime& start, const MediaTime& end)
304 LOG(MediaSource, "SourceBuffer::remove(%p) - start(%lf), end(%lf)", this, start.toDouble(), end.toDouble());
306 // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#dom-sourcebuffer-remove
307 // Section 3.2 remove() method steps.
308 // 1. If this object has been removed from the sourceBuffers attribute of the parent media source then throw
309 // an InvalidStateError exception and abort these steps.
310 // 2. If the updating attribute equals true, then throw an InvalidStateError exception and abort these steps.
311 if (isRemoved() || m_updating)
312 return Exception { INVALID_STATE_ERR };
314 // 3. If duration equals NaN, then throw a TypeError exception and abort these steps.
315 // 4. If start is negative or greater than duration, then throw a TypeError exception and abort these steps.
316 // 5. If end is less than or equal to start or end equals NaN, then throw a TypeError exception and abort these steps.
317 if (m_source->duration().isInvalid()
320 || start < MediaTime::zeroTime()
321 || start > m_source->duration()
323 return Exception { TypeError };
326 // 6. If the readyState attribute of the parent media source is in the "ended" state then run the following steps:
327 // 6.1. Set the readyState attribute of the parent media source to "open"
328 // 6.2. Queue a task to fire a simple event named sourceopen at the parent media source .
329 m_source->openIfInEndedState();
331 // 7. Run the range removal algorithm with start and end as the start and end of the removal range.
332 rangeRemoval(start, end);
337 void SourceBuffer::rangeRemoval(const MediaTime& start, const MediaTime& end)
339 // 3.5.7 Range Removal
340 // https://rawgit.com/w3c/media-source/7bbe4aa33c61ec025bc7acbd80354110f6a000f9/media-source.html#sourcebuffer-range-removal
341 // 1. Let start equal the starting presentation timestamp for the removal range.
342 // 2. Let end equal the end presentation timestamp for the removal range.
343 // 3. Set the updating attribute to true.
346 // 4. Queue a task to fire a simple event named updatestart at this SourceBuffer object.
347 scheduleEvent(eventNames().updatestartEvent);
349 // 5. Return control to the caller and run the rest of the steps asynchronously.
350 m_pendingRemoveStart = start;
351 m_pendingRemoveEnd = end;
352 m_removeTimer.startOneShot(0);
355 void SourceBuffer::abortIfUpdating()
357 // Section 3.2 abort() method step 4 substeps.
358 // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#dom-sourcebuffer-abort
363 // 4.1. Abort the buffer append algorithm if it is running.
364 m_appendBufferTimer.stop();
365 m_pendingAppendData.clear();
368 // 4.2. Set the updating attribute to false.
371 // 4.3. Queue a task to fire a simple event named abort at this SourceBuffer object.
372 scheduleEvent(eventNames().abortEvent);
374 // 4.4. Queue a task to fire a simple event named updateend at this SourceBuffer object.
375 scheduleEvent(eventNames().updateendEvent);
378 MediaTime SourceBuffer::highestPresentationTimestamp() const
380 MediaTime highestTime;
381 for (auto& trackBuffer : m_trackBufferMap.values()) {
382 auto lastSampleIter = trackBuffer.samples.presentationOrder().rbegin();
383 if (lastSampleIter == trackBuffer.samples.presentationOrder().rend())
385 highestTime = std::max(highestTime, lastSampleIter->first);
390 void SourceBuffer::readyStateChanged()
392 updateBufferedFromTrackBuffers();
395 void SourceBuffer::removedFromMediaSource()
402 for (auto& trackBufferPair : m_trackBufferMap.values()) {
403 trackBufferPair.samples.clear();
404 trackBufferPair.decodeQueue.clear();
407 m_private->removedFromMediaSource();
411 void SourceBuffer::seekToTime(const MediaTime& time)
413 LOG(MediaSource, "SourceBuffer::seekToTime(%p) - time(%s)", this, toString(time).utf8().data());
415 for (auto& trackBufferPair : m_trackBufferMap) {
416 TrackBuffer& trackBuffer = trackBufferPair.value;
417 const AtomicString& trackID = trackBufferPair.key;
419 trackBuffer.needsReenqueueing = true;
420 reenqueueMediaForTime(trackBuffer, trackID, time);
424 MediaTime SourceBuffer::sourceBufferPrivateFastSeekTimeForMediaTime(SourceBufferPrivate*, const MediaTime& targetTime, const MediaTime& negativeThreshold, const MediaTime& positiveThreshold)
426 MediaTime seekTime = targetTime;
427 MediaTime lowerBoundTime = targetTime - negativeThreshold;
428 MediaTime upperBoundTime = targetTime + positiveThreshold;
430 for (auto& trackBuffer : m_trackBufferMap.values()) {
431 // Find the sample which contains the target time time.
432 auto futureSyncSampleIterator = trackBuffer.samples.decodeOrder().findSyncSampleAfterPresentationTime(targetTime, positiveThreshold);
433 auto pastSyncSampleIterator = trackBuffer.samples.decodeOrder().findSyncSamplePriorToPresentationTime(targetTime, negativeThreshold);
434 auto upperBound = trackBuffer.samples.decodeOrder().end();
435 auto lowerBound = trackBuffer.samples.decodeOrder().rend();
437 if (futureSyncSampleIterator == upperBound && pastSyncSampleIterator == lowerBound)
440 MediaTime futureSeekTime = MediaTime::positiveInfiniteTime();
441 if (futureSyncSampleIterator != upperBound) {
442 RefPtr<MediaSample>& sample = futureSyncSampleIterator->second;
443 futureSeekTime = sample->presentationTime();
446 MediaTime pastSeekTime = MediaTime::negativeInfiniteTime();
447 if (pastSyncSampleIterator != lowerBound) {
448 RefPtr<MediaSample>& sample = pastSyncSampleIterator->second;
449 pastSeekTime = sample->presentationTime();
452 MediaTime trackSeekTime = abs(targetTime - futureSeekTime) < abs(targetTime - pastSeekTime) ? futureSeekTime : pastSeekTime;
453 if (abs(targetTime - trackSeekTime) > abs(targetTime - seekTime))
454 seekTime = trackSeekTime;
460 bool SourceBuffer::hasPendingActivity() const
462 return m_source || m_asyncEventQueue.hasPendingEvents();
465 void SourceBuffer::stop()
467 m_appendBufferTimer.stop();
468 m_removeTimer.stop();
471 bool SourceBuffer::canSuspendForDocumentSuspension() const
473 return !hasPendingActivity();
476 const char* SourceBuffer::activeDOMObjectName() const
478 return "SourceBuffer";
481 bool SourceBuffer::isRemoved() const
486 void SourceBuffer::scheduleEvent(const AtomicString& eventName)
488 auto event = Event::create(eventName, false, false);
489 event->setTarget(this);
491 m_asyncEventQueue.enqueueEvent(WTFMove(event));
494 ExceptionOr<void> SourceBuffer::appendBufferInternal(const unsigned char* data, unsigned size)
496 // Section 3.2 appendBuffer()
497 // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-SourceBuffer-appendBuffer-void-ArrayBufferView-data
499 // Step 1 is enforced by the caller.
500 // 2. Run the prepare append algorithm.
501 // Section 3.5.4 Prepare AppendAlgorithm
503 // 1. If the SourceBuffer has been removed from the sourceBuffers attribute of the parent media source
504 // then throw an INVALID_STATE_ERR exception and abort these steps.
505 // 2. If the updating attribute equals true, then throw an INVALID_STATE_ERR exception and abort these steps.
506 if (isRemoved() || m_updating)
507 return Exception { INVALID_STATE_ERR };
509 // 3. If the readyState attribute of the parent media source is in the "ended" state then run the following steps:
510 // 3.1. Set the readyState attribute of the parent media source to "open"
511 // 3.2. Queue a task to fire a simple event named sourceopen at the parent media source .
512 m_source->openIfInEndedState();
514 // 4. Run the coded frame eviction algorithm.
515 evictCodedFrames(size);
517 // FIXME: enable this code when MSE libraries have been updated to support it.
519 // 5. If the buffer full flag equals true, then throw a QUOTA_EXCEEDED_ERR exception and abort these step.
521 LOG(MediaSource, "SourceBuffer::appendBufferInternal(%p) - buffer full, failing with QUOTA_EXCEEDED_ERR error", this);
522 return Exception { QUOTA_EXCEEDED_ERR };
526 // NOTE: Return to 3.2 appendBuffer()
527 // 3. Add data to the end of the input buffer.
528 m_pendingAppendData.append(data, size);
530 // 4. Set the updating attribute to true.
533 // 5. Queue a task to fire a simple event named updatestart at this SourceBuffer object.
534 scheduleEvent(eventNames().updatestartEvent);
536 // 6. Asynchronously run the buffer append algorithm.
537 m_appendBufferTimer.startOneShot(0);
539 reportExtraMemoryAllocated();
544 void SourceBuffer::appendBufferTimerFired()
551 // Section 3.5.5 Buffer Append Algorithm
552 // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#sourcebuffer-buffer-append
554 // 1. Run the segment parser loop algorithm.
555 size_t appendSize = m_pendingAppendData.size();
557 // Resize buffer for 0 byte appends so we always have a valid pointer.
558 // We need to convey all appends, even 0 byte ones to |m_private| so
559 // that it can clear its end of stream state if necessary.
560 m_pendingAppendData.resize(1);
563 // Section 3.5.1 Segment Parser Loop
564 // https://dvcs.w3.org/hg/html-media/raw-file/tip/media-source/media-source.html#sourcebuffer-segment-parser-loop
565 // When the segment parser loop algorithm is invoked, run the following steps:
567 // 1. Loop Top: If the input buffer is empty, then jump to the need more data step below.
568 if (!m_pendingAppendData.size()) {
569 sourceBufferPrivateAppendComplete(&m_private.get(), AppendSucceeded);
573 m_private->append(m_pendingAppendData.data(), appendSize);
574 m_pendingAppendData.clear();
577 void SourceBuffer::sourceBufferPrivateAppendComplete(SourceBufferPrivate*, AppendResult result)
582 // Resolve the changes it TrackBuffers' buffered ranges
583 // into the SourceBuffer's buffered ranges
584 updateBufferedFromTrackBuffers();
586 // Section 3.5.5 Buffer Append Algorithm, ctd.
587 // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#sourcebuffer-buffer-append
589 // 2. If the input buffer contains bytes that violate the SourceBuffer byte stream format specification,
590 // then run the append error algorithm with the decode error parameter set to true and abort this algorithm.
591 if (result == ParsingFailed) {
592 LOG(MediaSource, "SourceBuffer::sourceBufferPrivateAppendComplete(%p) - result = ParsingFailed", this);
597 // NOTE: Steps 3 - 6 enforced by sourceBufferPrivateDidReceiveInitializationSegment() and
598 // sourceBufferPrivateDidReceiveSample below.
600 // 7. Need more data: Return control to the calling algorithm.
602 // NOTE: return to Section 3.5.5
603 // 2.If the segment parser loop algorithm in the previous step was aborted, then abort this algorithm.
604 if (result != AppendSucceeded)
607 // 3. Set the updating attribute to false.
610 // 4. Queue a task to fire a simple event named update at this SourceBuffer object.
611 scheduleEvent(eventNames().updateEvent);
613 // 5. Queue a task to fire a simple event named updateend at this SourceBuffer object.
614 scheduleEvent(eventNames().updateendEvent);
617 m_source->monitorSourceBuffers();
619 MediaTime currentMediaTime = m_source->currentTime();
620 for (auto& trackBufferPair : m_trackBufferMap) {
621 TrackBuffer& trackBuffer = trackBufferPair.value;
622 const AtomicString& trackID = trackBufferPair.key;
624 if (trackBuffer.needsReenqueueing) {
625 LOG(MediaSource, "SourceBuffer::sourceBufferPrivateAppendComplete(%p) - reenqueuing at time (%s)", this, toString(currentMediaTime).utf8().data());
626 reenqueueMediaForTime(trackBuffer, trackID, currentMediaTime);
628 provideMediaData(trackBuffer, trackID);
631 reportExtraMemoryAllocated();
632 if (extraMemoryCost() > this->maximumBufferSize())
635 LOG(Media, "SourceBuffer::sourceBufferPrivateAppendComplete(%p) - buffered = %s", this, toString(m_buffered->ranges()).utf8().data());
638 void SourceBuffer::sourceBufferPrivateDidReceiveRenderingError(SourceBufferPrivate*, int error)
644 LOG(MediaSource, "SourceBuffer::sourceBufferPrivateDidReceiveRenderingError(%p) - result = %i", this, error);
647 m_source->streamEndedWithError(MediaSource::EndOfStreamError::Decode);
650 static bool decodeTimeComparator(const PresentationOrderSampleMap::MapType::value_type& a, const PresentationOrderSampleMap::MapType::value_type& b)
652 return a.second->decodeTime() < b.second->decodeTime();
655 static PlatformTimeRanges removeSamplesFromTrackBuffer(const DecodeOrderSampleMap::MapType& samples, SourceBuffer::TrackBuffer& trackBuffer, const SourceBuffer* buffer, const char* logPrefix)
658 MediaTime earliestSample = MediaTime::positiveInfiniteTime();
659 MediaTime latestSample = MediaTime::zeroTime();
660 size_t bytesRemoved = 0;
662 UNUSED_PARAM(logPrefix);
663 UNUSED_PARAM(buffer);
666 PlatformTimeRanges erasedRanges;
667 for (auto sampleIt : samples) {
668 const DecodeOrderSampleMap::KeyType& decodeKey = sampleIt.first;
670 size_t startBufferSize = trackBuffer.samples.sizeInBytes();
673 RefPtr<MediaSample>& sample = sampleIt.second;
674 LOG(MediaSource, "SourceBuffer::%s(%p) - removing sample(%s)", logPrefix, buffer, toString(*sampleIt.second).utf8().data());
676 // Remove the erased samples from the TrackBuffer sample map.
677 trackBuffer.samples.removeSample(sample.get());
679 // Also remove the erased samples from the TrackBuffer decodeQueue.
680 trackBuffer.decodeQueue.erase(decodeKey);
682 auto startTime = sample->presentationTime();
683 auto endTime = startTime + sample->duration();
684 erasedRanges.add(startTime, endTime);
687 bytesRemoved += startBufferSize - trackBuffer.samples.sizeInBytes();
688 if (startTime < earliestSample)
689 earliestSample = startTime;
690 if (endTime > latestSample)
691 latestSample = endTime;
695 // Because we may have added artificial padding in the buffered ranges when adding samples, we may
696 // need to remove that padding when removing those same samples. Walk over the erased ranges looking
697 // for unbuffered areas and expand erasedRanges to encompass those areas.
698 PlatformTimeRanges additionalErasedRanges;
699 for (unsigned i = 0; i < erasedRanges.length(); ++i) {
700 auto erasedStart = erasedRanges.start(i);
701 auto erasedEnd = erasedRanges.end(i);
702 auto startIterator = trackBuffer.samples.presentationOrder().reverseFindSampleBeforePresentationTime(erasedStart);
703 if (startIterator == trackBuffer.samples.presentationOrder().rend())
704 additionalErasedRanges.add(MediaTime::zeroTime(), erasedStart);
706 auto& previousSample = *startIterator->second;
707 if (previousSample.presentationTime() + previousSample.duration() < erasedStart)
708 additionalErasedRanges.add(previousSample.presentationTime() + previousSample.duration(), erasedStart);
711 auto endIterator = trackBuffer.samples.presentationOrder().findSampleOnOrAfterPresentationTime(erasedEnd);
712 if (endIterator == trackBuffer.samples.presentationOrder().end())
713 additionalErasedRanges.add(erasedEnd, MediaTime::positiveInfiniteTime());
715 auto& nextSample = *endIterator->second;
716 if (nextSample.presentationTime() > erasedEnd)
717 additionalErasedRanges.add(erasedEnd, nextSample.presentationTime());
720 if (additionalErasedRanges.length())
721 erasedRanges.unionWith(additionalErasedRanges);
725 LOG(MediaSource, "SourceBuffer::%s(%p) removed %zu bytes, start(%lf), end(%lf)", logPrefix, buffer, bytesRemoved, earliestSample.toDouble(), latestSample.toDouble());
731 void SourceBuffer::removeCodedFrames(const MediaTime& start, const MediaTime& end)
733 LOG(MediaSource, "SourceBuffer::removeCodedFrames(%p) - start(%s), end(%s)", this, toString(start).utf8().data(), toString(end).utf8().data());
735 // 3.5.9 Coded Frame Removal Algorithm
736 // https://dvcs.w3.org/hg/html-media/raw-file/tip/media-source/media-source.html#sourcebuffer-coded-frame-removal
738 // 1. Let start be the starting presentation timestamp for the removal range.
739 MediaTime durationMediaTime = m_source->duration();
740 MediaTime currentMediaTime = m_source->currentTime();
742 // 2. Let end be the end presentation timestamp for the removal range.
743 // 3. For each track buffer in this source buffer, run the following steps:
744 for (auto& trackBuffer : m_trackBufferMap.values()) {
745 // 3.1. Let remove end timestamp be the current value of duration
746 // 3.2 If this track buffer has a random access point timestamp that is greater than or equal to end, then update
747 // remove end timestamp to that random access point timestamp.
749 // NOTE: To handle MediaSamples which may be an amalgamation of multiple shorter samples, find samples whose presentation
750 // interval straddles the start and end times, and divide them if possible:
751 auto divideSampleIfPossibleAtPresentationTime = [&] (const MediaTime& time) {
752 auto sampleIterator = trackBuffer.samples.presentationOrder().findSampleContainingPresentationTime(time);
753 if (sampleIterator == trackBuffer.samples.presentationOrder().end())
755 RefPtr<MediaSample> sample = sampleIterator->second;
756 if (!sample->isDivisable())
758 std::pair<RefPtr<MediaSample>, RefPtr<MediaSample>> replacementSamples = sample->divide(time);
759 if (!replacementSamples.first || !replacementSamples.second)
761 LOG(MediaSource, "SourceBuffer::removeCodedFrames(%p) - splitting sample (%s) into\n\t(%s)\n\t(%s)", this,
762 toString(sample).utf8().data(),
763 toString(replacementSamples.first).utf8().data(),
764 toString(replacementSamples.second).utf8().data());
765 trackBuffer.samples.removeSample(sample.get());
766 trackBuffer.samples.addSample(*replacementSamples.first);
767 trackBuffer.samples.addSample(*replacementSamples.second);
769 divideSampleIfPossibleAtPresentationTime(start);
770 divideSampleIfPossibleAtPresentationTime(end);
772 // NOTE: findSyncSampleAfterPresentationTime will return the next sync sample on or after the presentation time
773 // or decodeOrder().end() if no sync sample exists after that presentation time.
774 DecodeOrderSampleMap::iterator removeDecodeEnd = trackBuffer.samples.decodeOrder().findSyncSampleAfterPresentationTime(end);
775 PresentationOrderSampleMap::iterator removePresentationEnd;
776 if (removeDecodeEnd == trackBuffer.samples.decodeOrder().end())
777 removePresentationEnd = trackBuffer.samples.presentationOrder().end();
779 removePresentationEnd = trackBuffer.samples.presentationOrder().findSampleWithPresentationTime(removeDecodeEnd->second->presentationTime());
781 PresentationOrderSampleMap::iterator removePresentationStart = trackBuffer.samples.presentationOrder().findSampleOnOrAfterPresentationTime(start);
782 if (removePresentationStart == removePresentationEnd)
785 // 3.3 Remove all media data, from this track buffer, that contain starting timestamps greater than or equal to
786 // start and less than the remove end timestamp.
787 // NOTE: frames must be removed in decode order, so that all dependant frames between the frame to be removed
788 // and the next sync sample frame are removed. But we must start from the first sample in decode order, not
789 // presentation order.
790 PresentationOrderSampleMap::iterator minDecodeTimeIter = std::min_element(removePresentationStart, removePresentationEnd, decodeTimeComparator);
791 DecodeOrderSampleMap::KeyType decodeKey(minDecodeTimeIter->second->decodeTime(), minDecodeTimeIter->second->presentationTime());
792 DecodeOrderSampleMap::iterator removeDecodeStart = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey(decodeKey);
794 DecodeOrderSampleMap::MapType erasedSamples(removeDecodeStart, removeDecodeEnd);
795 PlatformTimeRanges erasedRanges = removeSamplesFromTrackBuffer(erasedSamples, trackBuffer, this, "removeCodedFrames");
797 // Only force the TrackBuffer to re-enqueue if the removed ranges overlap with enqueued and possibly
798 // not yet displayed samples.
799 if (trackBuffer.lastEnqueuedPresentationTime.isValid() && currentMediaTime < trackBuffer.lastEnqueuedPresentationTime) {
800 PlatformTimeRanges possiblyEnqueuedRanges(currentMediaTime, trackBuffer.lastEnqueuedPresentationTime);
801 possiblyEnqueuedRanges.intersectWith(erasedRanges);
802 if (possiblyEnqueuedRanges.length())
803 trackBuffer.needsReenqueueing = true;
806 erasedRanges.invert();
807 trackBuffer.buffered.intersectWith(erasedRanges);
808 setBufferedDirty(true);
810 // 3.4 If this object is in activeSourceBuffers, the current playback position is greater than or equal to start
811 // and less than the remove end timestamp, and HTMLMediaElement.readyState is greater than HAVE_METADATA, then set
812 // the HTMLMediaElement.readyState attribute to HAVE_METADATA and stall playback.
813 if (m_active && currentMediaTime >= start && currentMediaTime < end && m_private->readyState() > MediaPlayer::HaveMetadata)
814 m_private->setReadyState(MediaPlayer::HaveMetadata);
817 updateBufferedFromTrackBuffers();
819 // 4. If buffer full flag equals true and this object is ready to accept more bytes, then set the buffer full flag to false.
822 LOG(Media, "SourceBuffer::removeCodedFrames(%p) - buffered = %s", this, toString(m_buffered->ranges()).utf8().data());
825 void SourceBuffer::removeTimerFired()
831 ASSERT(m_pendingRemoveStart.isValid());
832 ASSERT(m_pendingRemoveStart < m_pendingRemoveEnd);
834 // Section 3.5.7 Range Removal
835 // http://w3c.github.io/media-source/#sourcebuffer-range-removal
837 // 6. Run the coded frame removal algorithm with start and end as the start and end of the removal range.
838 removeCodedFrames(m_pendingRemoveStart, m_pendingRemoveEnd);
840 // 7. Set the updating attribute to false.
842 m_pendingRemoveStart = MediaTime::invalidTime();
843 m_pendingRemoveEnd = MediaTime::invalidTime();
845 // 8. Queue a task to fire a simple event named update at this SourceBuffer object.
846 scheduleEvent(eventNames().updateEvent);
848 // 9. Queue a task to fire a simple event named updateend at this SourceBuffer object.
849 scheduleEvent(eventNames().updateendEvent);
852 void SourceBuffer::evictCodedFrames(size_t newDataSize)
854 // 3.5.13 Coded Frame Eviction Algorithm
855 // http://www.w3.org/TR/media-source/#sourcebuffer-coded-frame-eviction
860 // This algorithm is run to free up space in this source buffer when new data is appended.
861 // 1. Let new data equal the data that is about to be appended to this SourceBuffer.
862 // 2. If the buffer full flag equals false, then abort these steps.
866 size_t maximumBufferSize = this->maximumBufferSize();
868 // 3. Let removal ranges equal a list of presentation time ranges that can be evicted from
869 // the presentation to make room for the new data.
871 // NOTE: begin by removing data from the beginning of the buffered ranges, 30 seconds at
872 // a time, up to 30 seconds before currentTime.
873 MediaTime thirtySeconds = MediaTime(30, 1);
874 MediaTime currentTime = m_source->currentTime();
875 MediaTime maximumRangeEnd = currentTime - thirtySeconds;
878 LOG(MediaSource, "SourceBuffer::evictCodedFrames(%p) - currentTime = %lf, require %zu bytes, maximum buffer size is %zu", this, m_source->currentTime().toDouble(), extraMemoryCost() + newDataSize, maximumBufferSize);
879 size_t initialBufferedSize = extraMemoryCost();
882 MediaTime rangeStart = MediaTime::zeroTime();
883 MediaTime rangeEnd = rangeStart + thirtySeconds;
884 while (rangeStart < maximumRangeEnd) {
885 // 4. For each range in removal ranges, run the coded frame removal algorithm with start and
886 // end equal to the removal range start and end timestamp respectively.
887 removeCodedFrames(rangeStart, std::min(rangeEnd, maximumRangeEnd));
888 if (extraMemoryCost() + newDataSize < maximumBufferSize) {
889 m_bufferFull = false;
893 rangeStart += thirtySeconds;
894 rangeEnd += thirtySeconds;
898 LOG(MediaSource, "SourceBuffer::evictCodedFrames(%p) - evicted %zu bytes", this, initialBufferedSize - extraMemoryCost());
902 // If there still isn't enough free space and there buffers in time ranges after the current range (ie. there is a gap after
903 // the current buffered range), delete 30 seconds at a time from duration back to the current time range or 30 seconds after
904 // currenTime whichever we hit first.
905 auto buffered = m_buffered->ranges();
906 size_t currentTimeRange = buffered.find(currentTime);
907 if (currentTimeRange == notFound || currentTimeRange == buffered.length() - 1) {
908 LOG(MediaSource, "SourceBuffer::evictCodedFrames(%p) - evicted %zu bytes but FAILED to free enough", this, initialBufferedSize - extraMemoryCost());
912 MediaTime minimumRangeStart = currentTime + thirtySeconds;
914 rangeEnd = m_source->duration();
915 rangeStart = rangeEnd - thirtySeconds;
916 while (rangeStart > minimumRangeStart) {
918 // Do not evict data from the time range that contains currentTime.
919 size_t startTimeRange = buffered.find(rangeStart);
920 if (startTimeRange == currentTimeRange) {
921 size_t endTimeRange = buffered.find(rangeEnd);
922 if (endTimeRange == currentTimeRange)
925 rangeEnd = buffered.start(endTimeRange);
928 // 4. For each range in removal ranges, run the coded frame removal algorithm with start and
929 // end equal to the removal range start and end timestamp respectively.
930 removeCodedFrames(std::max(minimumRangeStart, rangeStart), rangeEnd);
931 if (extraMemoryCost() + newDataSize < maximumBufferSize) {
932 m_bufferFull = false;
936 rangeStart -= thirtySeconds;
937 rangeEnd -= thirtySeconds;
940 LOG(MediaSource, "SourceBuffer::evictCodedFrames(%p) - evicted %zu bytes%s", this, initialBufferedSize - extraMemoryCost(), m_bufferFull ? "" : " but FAILED to free enough");
943 size_t SourceBuffer::maximumBufferSize() const
948 HTMLMediaElement* element = m_source->mediaElement();
952 return element->maximumSourceBufferSize(*this);
955 VideoTrackList* SourceBuffer::videoTracks()
958 m_videoTracks = VideoTrackList::create(m_source->mediaElement(), ActiveDOMObject::scriptExecutionContext());
960 return m_videoTracks.get();
963 AudioTrackList* SourceBuffer::audioTracks()
966 m_audioTracks = AudioTrackList::create(m_source->mediaElement(), ActiveDOMObject::scriptExecutionContext());
968 return m_audioTracks.get();
971 TextTrackList* SourceBuffer::textTracks()
974 m_textTracks = TextTrackList::create(m_source->mediaElement(), ActiveDOMObject::scriptExecutionContext());
976 return m_textTracks.get();
979 void SourceBuffer::setActive(bool active)
981 if (m_active == active)
985 m_private->setActive(active);
987 m_source->sourceBufferDidChangeActiveState(*this, active);
990 void SourceBuffer::sourceBufferPrivateDidReceiveInitializationSegment(SourceBufferPrivate*, const InitializationSegment& segment)
995 LOG(MediaSource, "SourceBuffer::sourceBufferPrivateDidReceiveInitializationSegment(%p)", this);
997 // 3.5.8 Initialization Segment Received (ctd)
998 // https://rawgit.com/w3c/media-source/c3ad59c7a370d04430969ba73d18dc9bcde57a33/index.html#sourcebuffer-init-segment-received [Editor's Draft 09 January 2015]
1000 // 1. Update the duration attribute if it currently equals NaN:
1001 if (m_source->duration().isInvalid()) {
1002 // ↳ If the initialization segment contains a duration:
1003 // Run the duration change algorithm with new duration set to the duration in the initialization segment.
1005 // Run the duration change algorithm with new duration set to positive Infinity.
1006 MediaTime newDuration = segment.duration.isValid() ? segment.duration : MediaTime::positiveInfiniteTime();
1007 m_source->setDurationInternal(newDuration);
1010 // 2. If the initialization segment has no audio, video, or text tracks, then run the append error algorithm
1011 // with the decode error parameter set to true and abort these steps.
1012 if (segment.audioTracks.isEmpty() && segment.videoTracks.isEmpty() && segment.textTracks.isEmpty()) {
1017 // 3. If the first initialization segment flag is true, then run the following steps:
1018 if (m_receivedFirstInitializationSegment) {
1020 // 3.1. Verify the following properties. If any of the checks fail then run the append error algorithm
1021 // with the decode error parameter set to true and abort these steps.
1022 if (!validateInitializationSegment(segment)) {
1026 // 3.2 Add the appropriate track descriptions from this initialization segment to each of the track buffers.
1027 ASSERT(segment.audioTracks.size() == audioTracks()->length());
1028 for (auto& audioTrackInfo : segment.audioTracks) {
1029 if (audioTracks()->length() == 1) {
1030 audioTracks()->item(0)->setPrivate(audioTrackInfo.track);
1034 auto audioTrack = audioTracks()->getTrackById(audioTrackInfo.track->id());
1036 audioTrack->setPrivate(audioTrackInfo.track);
1039 ASSERT(segment.videoTracks.size() == videoTracks()->length());
1040 for (auto& videoTrackInfo : segment.videoTracks) {
1041 if (videoTracks()->length() == 1) {
1042 videoTracks()->item(0)->setPrivate(videoTrackInfo.track);
1046 auto videoTrack = videoTracks()->getTrackById(videoTrackInfo.track->id());
1048 videoTrack->setPrivate(videoTrackInfo.track);
1051 ASSERT(segment.textTracks.size() == textTracks()->length());
1052 for (auto& textTrackInfo : segment.textTracks) {
1053 if (textTracks()->length() == 1) {
1054 downcast<InbandTextTrack>(*textTracks()->item(0)).setPrivate(textTrackInfo.track);
1058 auto textTrack = textTracks()->getTrackById(textTrackInfo.track->id());
1060 downcast<InbandTextTrack>(*textTrack).setPrivate(textTrackInfo.track);
1063 // 3.3 Set the need random access point flag on all track buffers to true.
1064 for (auto& trackBuffer : m_trackBufferMap.values())
1065 trackBuffer.needRandomAccessFlag = true;
1068 // 4. Let active track flag equal false.
1069 bool activeTrackFlag = false;
1071 // 5. If the first initialization segment flag is false, then run the following steps:
1072 if (!m_receivedFirstInitializationSegment) {
1073 // 5.1 If the initialization segment contains tracks with codecs the user agent does not support,
1074 // then run the append error algorithm with the decode error parameter set to true and abort these steps.
1075 // NOTE: This check is the responsibility of the SourceBufferPrivate.
1077 // 5.2 For each audio track in the initialization segment, run following steps:
1078 for (auto& audioTrackInfo : segment.audioTracks) {
1079 AudioTrackPrivate* audioTrackPrivate = audioTrackInfo.track.get();
1081 // FIXME: Implement steps 5.2.1-5.2.8.1 as per Editor's Draft 09 January 2015, and reorder this
1082 // 5.2.1 Let new audio track be a new AudioTrack object.
1083 // 5.2.2 Generate a unique ID and assign it to the id property on new video track.
1084 auto newAudioTrack = AudioTrack::create(this, audioTrackPrivate);
1085 newAudioTrack->setSourceBuffer(this);
1087 // 5.2.3 If audioTracks.length equals 0, then run the following steps:
1088 if (!audioTracks()->length()) {
1089 // 5.2.3.1 Set the enabled property on new audio track to true.
1090 newAudioTrack->setEnabled(true);
1092 // 5.2.3.2 Set active track flag to true.
1093 activeTrackFlag = true;
1096 // 5.2.4 Add new audio track to the audioTracks attribute on this SourceBuffer object.
1097 // 5.2.5 Queue a task to fire a trusted event named addtrack, that does not bubble and is
1098 // not cancelable, and that uses the TrackEvent interface, at the AudioTrackList object
1099 // referenced by the audioTracks attribute on this SourceBuffer object.
1100 audioTracks()->append(newAudioTrack.copyRef());
1102 // 5.2.6 Add new audio track to the audioTracks attribute on the HTMLMediaElement.
1103 // 5.2.7 Queue a task to fire a trusted event named addtrack, that does not bubble and is
1104 // not cancelable, and that uses the TrackEvent interface, at the AudioTrackList object
1105 // referenced by the audioTracks attribute on the HTMLMediaElement.
1106 m_source->mediaElement()->audioTracks().append(newAudioTrack.copyRef());
1108 // 5.2.8 Create a new track buffer to store coded frames for this track.
1109 ASSERT(!m_trackBufferMap.contains(newAudioTrack->id()));
1110 TrackBuffer& trackBuffer = m_trackBufferMap.add(newAudioTrack->id(), TrackBuffer()).iterator->value;
1112 // 5.2.9 Add the track description for this track to the track buffer.
1113 trackBuffer.description = audioTrackInfo.description;
1115 m_audioCodecs.append(trackBuffer.description->codec());
1118 // 5.3 For each video track in the initialization segment, run following steps:
1119 for (auto& videoTrackInfo : segment.videoTracks) {
1120 VideoTrackPrivate* videoTrackPrivate = videoTrackInfo.track.get();
1122 // FIXME: Implement steps 5.3.1-5.3.8.1 as per Editor's Draft 09 January 2015, and reorder this
1123 // 5.3.1 Let new video track be a new VideoTrack object.
1124 // 5.3.2 Generate a unique ID and assign it to the id property on new video track.
1125 auto newVideoTrack = VideoTrack::create(this, videoTrackPrivate);
1126 newVideoTrack->setSourceBuffer(this);
1128 // 5.3.3 If videoTracks.length equals 0, then run the following steps:
1129 if (!videoTracks()->length()) {
1130 // 5.3.3.1 Set the selected property on new video track to true.
1131 newVideoTrack->setSelected(true);
1133 // 5.3.3.2 Set active track flag to true.
1134 activeTrackFlag = true;
1137 // 5.3.4 Add new video track to the videoTracks attribute on this SourceBuffer object.
1138 // 5.3.5 Queue a task to fire a trusted event named addtrack, that does not bubble and is
1139 // not cancelable, and that uses the TrackEvent interface, at the VideoTrackList object
1140 // referenced by the videoTracks attribute on this SourceBuffer object.
1141 videoTracks()->append(newVideoTrack.copyRef());
1143 // 5.3.6 Add new video track to the videoTracks attribute on the HTMLMediaElement.
1144 // 5.3.7 Queue a task to fire a trusted event named addtrack, that does not bubble and is
1145 // not cancelable, and that uses the TrackEvent interface, at the VideoTrackList object
1146 // referenced by the videoTracks attribute on the HTMLMediaElement.
1147 m_source->mediaElement()->videoTracks().append(newVideoTrack.copyRef());
1149 // 5.3.8 Create a new track buffer to store coded frames for this track.
1150 ASSERT(!m_trackBufferMap.contains(newVideoTrack->id()));
1151 TrackBuffer& trackBuffer = m_trackBufferMap.add(newVideoTrack->id(), TrackBuffer()).iterator->value;
1153 // 5.3.9 Add the track description for this track to the track buffer.
1154 trackBuffer.description = videoTrackInfo.description;
1156 m_videoCodecs.append(trackBuffer.description->codec());
1159 // 5.4 For each text track in the initialization segment, run following steps:
1160 for (auto& textTrackInfo : segment.textTracks) {
1161 InbandTextTrackPrivate* textTrackPrivate = textTrackInfo.track.get();
1163 // FIXME: Implement steps 5.4.1-5.4.8.1 as per Editor's Draft 09 January 2015, and reorder this
1164 // 5.4.1 Let new text track be a new TextTrack object with its properties populated with the
1165 // appropriate information from the initialization segment.
1166 RefPtr<InbandTextTrack> newTextTrack = InbandTextTrack::create(scriptExecutionContext(), this, textTrackPrivate);
1168 // 5.4.2 If the mode property on new text track equals "showing" or "hidden", then set active
1169 // track flag to true.
1170 if (textTrackPrivate->mode() != InbandTextTrackPrivate::Disabled)
1171 activeTrackFlag = true;
1173 // 5.4.3 Add new text track to the textTracks attribute on this SourceBuffer object.
1174 // 5.4.4 Queue a task to fire a trusted event named addtrack, that does not bubble and is
1175 // not cancelable, and that uses the TrackEvent interface, at textTracks attribute on this
1176 // SourceBuffer object.
1177 textTracks()->append(*newTextTrack);
1179 // 5.4.5 Add new text track to the textTracks attribute on the HTMLMediaElement.
1180 // 5.4.6 Queue a task to fire a trusted event named addtrack, that does not bubble and is
1181 // not cancelable, and that uses the TrackEvent interface, at the TextTrackList object
1182 // referenced by the textTracks attribute on the HTMLMediaElement.
1183 m_source->mediaElement()->textTracks().append(newTextTrack.releaseNonNull());
1185 // 5.4.7 Create a new track buffer to store coded frames for this track.
1186 ASSERT(!m_trackBufferMap.contains(textTrackPrivate->id()));
1187 TrackBuffer& trackBuffer = m_trackBufferMap.add(textTrackPrivate->id(), TrackBuffer()).iterator->value;
1189 // 5.4.8 Add the track description for this track to the track buffer.
1190 trackBuffer.description = textTrackInfo.description;
1192 m_textCodecs.append(trackBuffer.description->codec());
1195 // 5.5 If active track flag equals true, then run the following steps:
1196 if (activeTrackFlag) {
1197 // 5.5.1 Add this SourceBuffer to activeSourceBuffers.
1198 // 5.5.2 Queue a task to fire a simple event named addsourcebuffer at activeSourceBuffers
1202 // 5.6 Set first initialization segment flag to true.
1203 m_receivedFirstInitializationSegment = true;
1206 // 6. If the HTMLMediaElement.readyState attribute is HAVE_NOTHING, then run the following steps:
1207 if (m_private->readyState() == MediaPlayer::HaveNothing) {
1208 // 6.1 If one or more objects in sourceBuffers have first initialization segment flag set to false, then abort these steps.
1209 for (auto& sourceBuffer : *m_source->sourceBuffers()) {
1210 if (!sourceBuffer->m_receivedFirstInitializationSegment)
1214 // 6.2 Set the HTMLMediaElement.readyState attribute to HAVE_METADATA.
1215 // 6.3 Queue a task to fire a simple event named loadedmetadata at the media element.
1216 m_private->setReadyState(MediaPlayer::HaveMetadata);
1219 // 7. If the active track flag equals true and the HTMLMediaElement.readyState
1220 // attribute is greater than HAVE_CURRENT_DATA, then set the HTMLMediaElement.readyState
1221 // attribute to HAVE_METADATA.
1222 if (activeTrackFlag && m_private->readyState() > MediaPlayer::HaveCurrentData)
1223 m_private->setReadyState(MediaPlayer::HaveMetadata);
1226 bool SourceBuffer::validateInitializationSegment(const InitializationSegment& segment)
1228 // FIXME: ordering of all 3.5.X (X>=7) functions needs to be updated to post-[24 July 2014 Editor's Draft] version
1229 // 3.5.8 Initialization Segment Received (ctd)
1230 // https://rawgit.com/w3c/media-source/c3ad59c7a370d04430969ba73d18dc9bcde57a33/index.html#sourcebuffer-init-segment-received [Editor's Draft 09 January 2015]
1232 // Note: those are checks from step 3.1
1233 // * The number of audio, video, and text tracks match what was in the first initialization segment.
1234 if (segment.audioTracks.size() != audioTracks()->length()
1235 || segment.videoTracks.size() != videoTracks()->length()
1236 || segment.textTracks.size() != textTracks()->length())
1239 // * The codecs for each track, match what was specified in the first initialization segment.
1240 for (auto& audioTrackInfo : segment.audioTracks) {
1241 if (!m_audioCodecs.contains(audioTrackInfo.description->codec()))
1245 for (auto& videoTrackInfo : segment.videoTracks) {
1246 if (!m_videoCodecs.contains(videoTrackInfo.description->codec()))
1250 for (auto& textTrackInfo : segment.textTracks) {
1251 if (!m_textCodecs.contains(textTrackInfo.description->codec()))
1255 // * If more than one track for a single type are present (ie 2 audio tracks), then the Track
1256 // IDs match the ones in the first initialization segment.
1257 if (segment.audioTracks.size() >= 2) {
1258 for (auto& audioTrackInfo : segment.audioTracks) {
1259 if (!m_trackBufferMap.contains(audioTrackInfo.track->id()))
1264 if (segment.videoTracks.size() >= 2) {
1265 for (auto& videoTrackInfo : segment.videoTracks) {
1266 if (!m_trackBufferMap.contains(videoTrackInfo.track->id()))
1271 if (segment.textTracks.size() >= 2) {
1272 for (auto& textTrackInfo : segment.videoTracks) {
1273 if (!m_trackBufferMap.contains(textTrackInfo.track->id()))
1281 class SampleLessThanComparator {
1283 bool operator()(std::pair<MediaTime, RefPtr<MediaSample>> value1, std::pair<MediaTime, RefPtr<MediaSample>> value2)
1285 return value1.first < value2.first;
1288 bool operator()(MediaTime value1, std::pair<MediaTime, RefPtr<MediaSample>> value2)
1290 return value1 < value2.first;
1293 bool operator()(std::pair<MediaTime, RefPtr<MediaSample>> value1, MediaTime value2)
1295 return value1.first < value2;
1299 void SourceBuffer::appendError(bool decodeErrorParam)
1301 // 3.5.3 Append Error Algorithm
1302 // https://rawgit.com/w3c/media-source/c3ad59c7a370d04430969ba73d18dc9bcde57a33/index.html#sourcebuffer-append-error [Editor's Draft 09 January 2015]
1305 // 1. Run the reset parser state algorithm.
1308 // 2. Set the updating attribute to false.
1311 // 3. Queue a task to fire a simple event named error at this SourceBuffer object.
1312 scheduleEvent(eventNames().errorEvent);
1314 // 4. Queue a task to fire a simple event named updateend at this SourceBuffer object.
1315 scheduleEvent(eventNames().updateendEvent);
1317 // 5. If decode error is true, then run the end of stream algorithm with the error parameter set to "decode".
1318 if (decodeErrorParam)
1319 m_source->streamEndedWithError(MediaSource::EndOfStreamError::Decode);
1322 void SourceBuffer::sourceBufferPrivateDidReceiveSample(SourceBufferPrivate*, MediaSample& sample)
1327 // 3.5.1 Segment Parser Loop
1328 // 6.1 If the first initialization segment received flag is false, then run the append error algorithm
1329 // with the decode error parameter set to true and abort this algorithm.
1330 // Note: current design makes SourceBuffer somehow ignorant of append state - it's more a thing
1331 // of SourceBufferPrivate. That's why this check can't really be done in appendInternal.
1332 // unless we force some kind of design with state machine switching.
1333 if (!m_receivedFirstInitializationSegment) {
1338 // 3.5.8 Coded Frame Processing
1339 // http://www.w3.org/TR/media-source/#sourcebuffer-coded-frame-processing
1341 // When complete coded frames have been parsed by the segment parser loop then the following steps
1343 // 1. For each coded frame in the media segment run the following steps:
1346 MediaTime presentationTimestamp;
1347 MediaTime decodeTimestamp;
1349 if (m_shouldGenerateTimestamps) {
1350 // ↳ If generate timestamps flag equals true:
1351 // 1. Let presentation timestamp equal 0.
1352 presentationTimestamp = MediaTime::zeroTime();
1354 // 2. Let decode timestamp equal 0.
1355 decodeTimestamp = MediaTime::zeroTime();
1358 // 1. Let presentation timestamp be a double precision floating point representation of
1359 // the coded frame's presentation timestamp in seconds.
1360 presentationTimestamp = sample.presentationTime();
1362 // 2. Let decode timestamp be a double precision floating point representation of the coded frame's
1363 // decode timestamp in seconds.
1364 decodeTimestamp = sample.decodeTime();
1367 // 1.2 Let frame duration be a double precision floating point representation of the coded frame's
1368 // duration in seconds.
1369 MediaTime frameDuration = sample.duration();
1371 // 1.3 If mode equals "sequence" and group start timestamp is set, then run the following steps:
1372 if (m_mode == AppendMode::Sequence && m_groupStartTimestamp.isValid()) {
1373 // 1.3.1 Set timestampOffset equal to group start timestamp - presentation timestamp.
1374 m_timestampOffset = m_groupStartTimestamp;
1376 // 1.3.2 Set group end timestamp equal to group start timestamp.
1377 m_groupEndTimestamp = m_groupStartTimestamp;
1379 // 1.3.3 Set the need random access point flag on all track buffers to true.
1380 for (auto& trackBuffer : m_trackBufferMap.values())
1381 trackBuffer.needRandomAccessFlag = true;
1383 // 1.3.4 Unset group start timestamp.
1384 m_groupStartTimestamp = MediaTime::invalidTime();
1387 // 1.4 If timestampOffset is not 0, then run the following steps:
1388 if (m_timestampOffset) {
1389 // 1.4.1 Add timestampOffset to the presentation timestamp.
1390 presentationTimestamp += m_timestampOffset;
1392 // 1.4.2 Add timestampOffset to the decode timestamp.
1393 decodeTimestamp += m_timestampOffset;
1396 // 1.5 Let track buffer equal the track buffer that the coded frame will be added to.
1397 AtomicString trackID = sample.trackID();
1398 auto it = m_trackBufferMap.find(trackID);
1399 if (it == m_trackBufferMap.end())
1400 it = m_trackBufferMap.add(trackID, TrackBuffer()).iterator;
1401 TrackBuffer& trackBuffer = it->value;
1403 // 1.6 ↳ If last decode timestamp for track buffer is set and decode timestamp is less than last
1404 // decode timestamp:
1406 // ↳ If last decode timestamp for track buffer is set and the difference between decode timestamp and
1407 // last decode timestamp is greater than 2 times last frame duration:
1408 if (trackBuffer.lastDecodeTimestamp.isValid() && (decodeTimestamp < trackBuffer.lastDecodeTimestamp
1409 || abs(decodeTimestamp - trackBuffer.lastDecodeTimestamp) > (trackBuffer.lastFrameDuration * 2))) {
1412 if (m_mode == AppendMode::Segments) {
1413 // ↳ If mode equals "segments":
1414 // Set group end timestamp to presentation timestamp.
1415 m_groupEndTimestamp = presentationTimestamp;
1417 // ↳ If mode equals "sequence":
1418 // Set group start timestamp equal to the group end timestamp.
1419 m_groupStartTimestamp = m_groupEndTimestamp;
1422 for (auto& trackBuffer : m_trackBufferMap.values()) {
1423 // 1.6.2 Unset the last decode timestamp on all track buffers.
1424 trackBuffer.lastDecodeTimestamp = MediaTime::invalidTime();
1425 // 1.6.3 Unset the last frame duration on all track buffers.
1426 trackBuffer.lastFrameDuration = MediaTime::invalidTime();
1427 // 1.6.4 Unset the highest presentation timestamp on all track buffers.
1428 trackBuffer.highestPresentationTimestamp = MediaTime::invalidTime();
1429 // 1.6.5 Set the need random access point flag on all track buffers to true.
1430 trackBuffer.needRandomAccessFlag = true;
1433 // 1.6.6 Jump to the Loop Top step above to restart processing of the current coded frame.
1437 if (m_mode == AppendMode::Sequence) {
1438 // Use the generated timestamps instead of the sample's timestamps.
1439 sample.setTimestamps(presentationTimestamp, decodeTimestamp);
1440 } else if (m_timestampOffset) {
1441 // Reflect the timestamp offset into the sample.
1442 sample.offsetTimestampsBy(m_timestampOffset);
1445 // 1.7 Let frame end timestamp equal the sum of presentation timestamp and frame duration.
1446 MediaTime frameEndTimestamp = presentationTimestamp + frameDuration;
1448 // 1.8 If presentation timestamp is less than appendWindowStart, then set the need random access
1449 // point flag to true, drop the coded frame, and jump to the top of the loop to start processing
1450 // the next coded frame.
1451 // 1.9 If frame end timestamp is greater than appendWindowEnd, then set the need random access
1452 // point flag to true, drop the coded frame, and jump to the top of the loop to start processing
1453 // the next coded frame.
1454 if (presentationTimestamp < m_appendWindowStart || frameEndTimestamp > m_appendWindowEnd) {
1455 trackBuffer.needRandomAccessFlag = true;
1461 // 1.10 If the decode timestamp is less than the presentation start time, then run the end of stream
1462 // algorithm with the error parameter set to "decode", and abort these steps.
1463 // NOTE: Until <https://www.w3.org/Bugs/Public/show_bug.cgi?id=27487> is resolved, we will only check
1464 // the presentation timestamp.
1465 MediaTime presentationStartTime = MediaTime::zeroTime();
1466 if (presentationTimestamp < presentationStartTime) {
1467 LOG(MediaSource, "SourceBuffer::sourceBufferPrivateDidReceiveSample(%p) - failing because presentationTimestamp < presentationStartTime", this);
1468 m_source->streamEndedWithError(MediaSource::EndOfStreamError::Decode);
1472 // 1.11 If the need random access point flag on track buffer equals true, then run the following steps:
1473 if (trackBuffer.needRandomAccessFlag) {
1474 // 1.11.1 If the coded frame is not a random access point, then drop the coded frame and jump
1475 // to the top of the loop to start processing the next coded frame.
1476 if (!sample.isSync()) {
1481 // 1.11.2 Set the need random access point flag on track buffer to false.
1482 trackBuffer.needRandomAccessFlag = false;
1485 // 1.12 Let spliced audio frame be an unset variable for holding audio splice information
1486 // 1.13 Let spliced timed text frame be an unset variable for holding timed text splice information
1487 // FIXME: Add support for sample splicing.
1489 SampleMap erasedSamples;
1490 MediaTime microsecond(1, 1000000);
1492 // 1.14 If last decode timestamp for track buffer is unset and presentation timestamp falls
1493 // falls within the presentation interval of a coded frame in track buffer, then run the
1495 if (trackBuffer.lastDecodeTimestamp.isInvalid()) {
1496 auto iter = trackBuffer.samples.presentationOrder().findSampleContainingPresentationTime(presentationTimestamp);
1497 if (iter != trackBuffer.samples.presentationOrder().end()) {
1498 // 1.14.1 Let overlapped frame be the coded frame in track buffer that matches the condition above.
1499 RefPtr<MediaSample> overlappedFrame = iter->second;
1501 // 1.14.2 If track buffer contains audio coded frames:
1502 // Run the audio splice frame algorithm and if a splice frame is returned, assign it to
1503 // spliced audio frame.
1504 // FIXME: Add support for sample splicing.
1506 // If track buffer contains video coded frames:
1507 if (trackBuffer.description->isVideo()) {
1508 // 1.14.2.1 Let overlapped frame presentation timestamp equal the presentation timestamp
1509 // of overlapped frame.
1510 MediaTime overlappedFramePresentationTimestamp = overlappedFrame->presentationTime();
1512 // 1.14.2.2 Let remove window timestamp equal overlapped frame presentation timestamp
1513 // plus 1 microsecond.
1514 MediaTime removeWindowTimestamp = overlappedFramePresentationTimestamp + microsecond;
1516 // 1.14.2.3 If the presentation timestamp is less than the remove window timestamp,
1517 // then remove overlapped frame and any coded frames that depend on it from track buffer.
1518 if (presentationTimestamp < removeWindowTimestamp)
1519 erasedSamples.addSample(*iter->second);
1522 // If track buffer contains timed text coded frames:
1523 // Run the text splice frame algorithm and if a splice frame is returned, assign it to spliced timed text frame.
1524 // FIXME: Add support for sample splicing.
1528 // 1.15 Remove existing coded frames in track buffer:
1529 // If highest presentation timestamp for track buffer is not set:
1530 if (trackBuffer.highestPresentationTimestamp.isInvalid()) {
1531 // Remove all coded frames from track buffer that have a presentation timestamp greater than or
1532 // equal to presentation timestamp and less than frame end timestamp.
1533 auto iter_pair = trackBuffer.samples.presentationOrder().findSamplesBetweenPresentationTimes(presentationTimestamp, frameEndTimestamp);
1534 if (iter_pair.first != trackBuffer.samples.presentationOrder().end())
1535 erasedSamples.addRange(iter_pair.first, iter_pair.second);
1538 // If highest presentation timestamp for track buffer is set and less than or equal to presentation timestamp
1539 if (trackBuffer.highestPresentationTimestamp.isValid() && trackBuffer.highestPresentationTimestamp <= presentationTimestamp) {
1540 // Remove all coded frames from track buffer that have a presentation timestamp greater than highest
1541 // presentation timestamp and less than or equal to frame end timestamp.
1543 // NOTE: Searching from the end of the trackBuffer will be vastly more efficient if the search range is
1544 // near the end of the buffered range. Use a linear-backwards search if the search range is within one
1545 // frame duration of the end:
1546 unsigned bufferedLength = trackBuffer.buffered.length();
1547 if (!bufferedLength)
1550 MediaTime highestBufferedTime = trackBuffer.buffered.maximumBufferedTime();
1552 PresentationOrderSampleMap::iterator_range range;
1553 if (highestBufferedTime - trackBuffer.highestPresentationTimestamp < trackBuffer.lastFrameDuration)
1554 range = trackBuffer.samples.presentationOrder().findSamplesWithinPresentationRangeFromEnd(trackBuffer.highestPresentationTimestamp, frameEndTimestamp);
1556 range = trackBuffer.samples.presentationOrder().findSamplesWithinPresentationRange(trackBuffer.highestPresentationTimestamp, frameEndTimestamp);
1558 if (range.first != trackBuffer.samples.presentationOrder().end())
1559 erasedSamples.addRange(range.first, range.second);
1563 // 1.16 Remove decoding dependencies of the coded frames removed in the previous step:
1564 DecodeOrderSampleMap::MapType dependentSamples;
1565 if (!erasedSamples.empty()) {
1566 // If detailed information about decoding dependencies is available:
1567 // FIXME: Add support for detailed dependency information
1569 // Otherwise: Remove all coded frames between the coded frames removed in the previous step
1570 // and the next random access point after those removed frames.
1571 auto firstDecodeIter = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey(erasedSamples.decodeOrder().begin()->first);
1572 auto lastDecodeIter = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey(erasedSamples.decodeOrder().rbegin()->first);
1573 auto nextSyncIter = trackBuffer.samples.decodeOrder().findSyncSampleAfterDecodeIterator(lastDecodeIter);
1574 dependentSamples.insert(firstDecodeIter, nextSyncIter);
1576 PlatformTimeRanges erasedRanges = removeSamplesFromTrackBuffer(dependentSamples, trackBuffer, this, "sourceBufferPrivateDidReceiveSample");
1578 // Only force the TrackBuffer to re-enqueue if the removed ranges overlap with enqueued and possibly
1579 // not yet displayed samples.
1580 MediaTime currentMediaTime = m_source->currentTime();
1581 if (currentMediaTime < trackBuffer.lastEnqueuedPresentationTime) {
1582 PlatformTimeRanges possiblyEnqueuedRanges(currentMediaTime, trackBuffer.lastEnqueuedPresentationTime);
1583 possiblyEnqueuedRanges.intersectWith(erasedRanges);
1584 if (possiblyEnqueuedRanges.length())
1585 trackBuffer.needsReenqueueing = true;
1588 erasedRanges.invert();
1589 trackBuffer.buffered.intersectWith(erasedRanges);
1590 setBufferedDirty(true);
1593 // 1.17 If spliced audio frame is set:
1594 // Add spliced audio frame to the track buffer.
1595 // If spliced timed text frame is set:
1596 // Add spliced timed text frame to the track buffer.
1597 // FIXME: Add support for sample splicing.
1600 // Add the coded frame with the presentation timestamp, decode timestamp, and frame duration to the track buffer.
1601 trackBuffer.samples.addSample(sample);
1603 if (trackBuffer.lastEnqueuedDecodeEndTime.isInvalid() || decodeTimestamp >= trackBuffer.lastEnqueuedDecodeEndTime) {
1604 DecodeOrderSampleMap::KeyType decodeKey(decodeTimestamp, presentationTimestamp);
1605 trackBuffer.decodeQueue.insert(DecodeOrderSampleMap::MapType::value_type(decodeKey, &sample));
1608 // 1.18 Set last decode timestamp for track buffer to decode timestamp.
1609 trackBuffer.lastDecodeTimestamp = decodeTimestamp;
1611 // 1.19 Set last frame duration for track buffer to frame duration.
1612 trackBuffer.lastFrameDuration = frameDuration;
1614 // 1.20 If highest presentation timestamp for track buffer is unset or frame end timestamp is greater
1615 // than highest presentation timestamp, then set highest presentation timestamp for track buffer
1616 // to frame end timestamp.
1617 if (trackBuffer.highestPresentationTimestamp.isInvalid() || frameEndTimestamp > trackBuffer.highestPresentationTimestamp)
1618 trackBuffer.highestPresentationTimestamp = frameEndTimestamp;
1620 // 1.21 If frame end timestamp is greater than group end timestamp, then set group end timestamp equal
1621 // to frame end timestamp.
1622 if (m_groupEndTimestamp.isInvalid() || frameEndTimestamp > m_groupEndTimestamp)
1623 m_groupEndTimestamp = frameEndTimestamp;
1625 // 1.22 If generate timestamps flag equals true, then set timestampOffset equal to frame end timestamp.
1626 if (m_shouldGenerateTimestamps)
1627 m_timestampOffset = frameEndTimestamp;
1629 // Eliminate small gaps between buffered ranges by coalescing
1630 // disjoint ranges separated by less than a "fudge factor".
1631 auto presentationEndTime = presentationTimestamp + frameDuration;
1632 auto nearestToPresentationStartTime = trackBuffer.buffered.nearest(presentationTimestamp);
1633 if (nearestToPresentationStartTime.isValid() && (presentationTimestamp - nearestToPresentationStartTime).isBetween(MediaTime::zeroTime(), MediaSource::currentTimeFudgeFactor()))
1634 presentationTimestamp = nearestToPresentationStartTime;
1636 auto nearestToPresentationEndTime = trackBuffer.buffered.nearest(presentationEndTime);
1637 if (nearestToPresentationStartTime.isValid() && (nearestToPresentationEndTime - presentationEndTime).isBetween(MediaTime::zeroTime(), MediaSource::currentTimeFudgeFactor()))
1638 presentationEndTime = nearestToPresentationEndTime;
1640 trackBuffer.buffered.add(presentationTimestamp, presentationEndTime);
1641 m_bufferedSinceLastMonitor += frameDuration.toDouble();
1642 setBufferedDirty(true);
1647 // Steps 2-4 will be handled by MediaSource::monitorSourceBuffers()
1649 // 5. If the media segment contains data beyond the current duration, then run the duration change algorithm with new
1650 // duration set to the maximum of the current duration and the group end timestamp.
1651 if (m_groupEndTimestamp > m_source->duration())
1652 m_source->setDurationInternal(m_groupEndTimestamp);
1655 bool SourceBuffer::hasAudio() const
1657 return m_audioTracks && m_audioTracks->length();
1660 bool SourceBuffer::hasVideo() const
1662 return m_videoTracks && m_videoTracks->length();
1665 bool SourceBuffer::sourceBufferPrivateHasAudio(const SourceBufferPrivate*) const
1670 bool SourceBuffer::sourceBufferPrivateHasVideo(const SourceBufferPrivate*) const
1675 void SourceBuffer::videoTrackSelectedChanged(VideoTrack* track)
1677 // 2.4.5 Changes to selected/enabled track state
1678 // If the selected video track changes, then run the following steps:
1679 // 1. If the SourceBuffer associated with the previously selected video track is not associated with
1680 // any other enabled tracks, run the following steps:
1681 if (!track->selected()
1682 && (!m_videoTracks || !m_videoTracks->isAnyTrackEnabled())
1683 && (!m_audioTracks || !m_audioTracks->isAnyTrackEnabled())
1684 && (!m_textTracks || !m_textTracks->isAnyTrackEnabled())) {
1685 // 1.1 Remove the SourceBuffer from activeSourceBuffers.
1686 // 1.2 Queue a task to fire a simple event named removesourcebuffer at activeSourceBuffers
1688 } else if (track->selected()) {
1689 // 2. If the SourceBuffer associated with the newly selected video track is not already in activeSourceBuffers,
1690 // run the following steps:
1691 // 2.1 Add the SourceBuffer to activeSourceBuffers.
1692 // 2.2 Queue a task to fire a simple event named addsourcebuffer at activeSourceBuffers
1696 if (m_videoTracks && m_videoTracks->contains(*track))
1697 m_videoTracks->scheduleChangeEvent();
1700 m_source->mediaElement()->videoTrackSelectedChanged(track);
1703 void SourceBuffer::audioTrackEnabledChanged(AudioTrack* track)
1705 // 2.4.5 Changes to selected/enabled track state
1706 // If an audio track becomes disabled and the SourceBuffer associated with this track is not
1707 // associated with any other enabled or selected track, then run the following steps:
1708 if (!track->enabled()
1709 && (!m_videoTracks || !m_videoTracks->isAnyTrackEnabled())
1710 && (!m_audioTracks || !m_audioTracks->isAnyTrackEnabled())
1711 && (!m_textTracks || !m_textTracks->isAnyTrackEnabled())) {
1712 // 1. Remove the SourceBuffer associated with the audio track from activeSourceBuffers
1713 // 2. Queue a task to fire a simple event named removesourcebuffer at activeSourceBuffers
1715 } else if (track->enabled()) {
1716 // If an audio track becomes enabled and the SourceBuffer associated with this track is
1717 // not already in activeSourceBuffers, then run the following steps:
1718 // 1. Add the SourceBuffer associated with the audio track to activeSourceBuffers
1719 // 2. Queue a task to fire a simple event named addsourcebuffer at activeSourceBuffers
1723 if (m_audioTracks && m_audioTracks->contains(*track))
1724 m_audioTracks->scheduleChangeEvent();
1727 m_source->mediaElement()->audioTrackEnabledChanged(track);
1730 void SourceBuffer::textTrackModeChanged(TextTrack* track)
1732 // 2.4.5 Changes to selected/enabled track state
1733 // If a text track mode becomes "disabled" and the SourceBuffer associated with this track is not
1734 // associated with any other enabled or selected track, then run the following steps:
1735 if (track->mode() == TextTrack::Mode::Disabled
1736 && (!m_videoTracks || !m_videoTracks->isAnyTrackEnabled())
1737 && (!m_audioTracks || !m_audioTracks->isAnyTrackEnabled())
1738 && (!m_textTracks || !m_textTracks->isAnyTrackEnabled())) {
1739 // 1. Remove the SourceBuffer associated with the audio track from activeSourceBuffers
1740 // 2. Queue a task to fire a simple event named removesourcebuffer at activeSourceBuffers
1743 // If a text track mode becomes "showing" or "hidden" and the SourceBuffer associated with this
1744 // track is not already in activeSourceBuffers, then run the following steps:
1745 // 1. Add the SourceBuffer associated with the text track to activeSourceBuffers
1746 // 2. Queue a task to fire a simple event named addsourcebuffer at activeSourceBuffers
1750 if (m_textTracks && m_textTracks->contains(*track))
1751 m_textTracks->scheduleChangeEvent();
1754 m_source->mediaElement()->textTrackModeChanged(track);
1757 void SourceBuffer::textTrackAddCue(TextTrack* track, TextTrackCue& cue)
1760 m_source->mediaElement()->textTrackAddCue(track, cue);
1763 void SourceBuffer::textTrackAddCues(TextTrack* track, TextTrackCueList const* cueList)
1766 m_source->mediaElement()->textTrackAddCues(track, cueList);
1769 void SourceBuffer::textTrackRemoveCue(TextTrack* track, TextTrackCue& cue)
1772 m_source->mediaElement()->textTrackRemoveCue(track, cue);
1775 void SourceBuffer::textTrackRemoveCues(TextTrack* track, TextTrackCueList const* cueList)
1778 m_source->mediaElement()->textTrackRemoveCues(track, cueList);
1781 void SourceBuffer::textTrackKindChanged(TextTrack* track)
1784 m_source->mediaElement()->textTrackKindChanged(track);
1787 void SourceBuffer::sourceBufferPrivateDidBecomeReadyForMoreSamples(SourceBufferPrivate*, AtomicString trackID)
1789 LOG(MediaSource, "SourceBuffer::sourceBufferPrivateDidBecomeReadyForMoreSamples(%p)", this);
1790 auto it = m_trackBufferMap.find(trackID);
1791 if (it == m_trackBufferMap.end())
1794 TrackBuffer& trackBuffer = it->value;
1795 if (!trackBuffer.needsReenqueueing && !m_source->isSeeking())
1796 provideMediaData(trackBuffer, trackID);
1799 void SourceBuffer::provideMediaData(TrackBuffer& trackBuffer, AtomicString trackID)
1802 unsigned enqueuedSamples = 0;
1805 while (!trackBuffer.decodeQueue.empty()) {
1806 if (!m_private->isReadyForMoreSamples(trackID)) {
1807 m_private->notifyClientWhenReadyForMoreSamples(trackID);
1811 // FIXME(rdar://problem/20635969): Remove this re-entrancy protection when the aforementioned radar is resolved; protecting
1812 // against re-entrancy introduces a small inefficency when removing appended samples from the decode queue one at a time
1813 // rather than when all samples have been enqueued.
1814 auto sample = trackBuffer.decodeQueue.begin()->second;
1815 trackBuffer.decodeQueue.erase(trackBuffer.decodeQueue.begin());
1817 // Do not enqueue samples spanning a significant unbuffered gap.
1818 // NOTE: one second is somewhat arbitrary. MediaSource::monitorSourceBuffers() is run
1819 // on the playbackTimer, which is effectively every 350ms. Allowing > 350ms gap between
1820 // enqueued samples allows for situations where we overrun the end of a buffered range
1821 // but don't notice for 350s of playback time, and the client can enqueue data for the
1822 // new current time without triggering this early return.
1823 // FIXME(135867): Make this gap detection logic less arbitrary.
1824 MediaTime oneSecond(1, 1);
1825 if (trackBuffer.lastEnqueuedDecodeEndTime.isValid() && sample->decodeTime() - trackBuffer.lastEnqueuedDecodeEndTime > oneSecond)
1828 trackBuffer.lastEnqueuedPresentationTime = sample->presentationTime();
1829 trackBuffer.lastEnqueuedDecodeEndTime = sample->decodeTime() + sample->duration();
1830 m_private->enqueueSample(WTFMove(sample), trackID);
1836 LOG(MediaSource, "SourceBuffer::provideMediaData(%p) - Enqueued %u samples", this, enqueuedSamples);
1839 void SourceBuffer::reenqueueMediaForTime(TrackBuffer& trackBuffer, AtomicString trackID, const MediaTime& time)
1841 // Find the sample which contains the current presentation time.
1842 auto currentSamplePTSIterator = trackBuffer.samples.presentationOrder().findSampleContainingPresentationTime(time);
1844 if (currentSamplePTSIterator == trackBuffer.samples.presentationOrder().end())
1845 currentSamplePTSIterator = trackBuffer.samples.presentationOrder().findSampleOnOrAfterPresentationTime(time);
1847 if (currentSamplePTSIterator == trackBuffer.samples.presentationOrder().end()
1848 || (currentSamplePTSIterator->first - time) > MediaSource::currentTimeFudgeFactor()) {
1849 trackBuffer.decodeQueue.clear();
1850 m_private->flushAndEnqueueNonDisplayingSamples(Vector<RefPtr<MediaSample>>(), trackID);
1854 // Seach backward for the previous sync sample.
1855 DecodeOrderSampleMap::KeyType decodeKey(currentSamplePTSIterator->second->decodeTime(), currentSamplePTSIterator->second->presentationTime());
1856 auto currentSampleDTSIterator = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey(decodeKey);
1857 ASSERT(currentSampleDTSIterator != trackBuffer.samples.decodeOrder().end());
1859 auto reverseCurrentSampleIter = --DecodeOrderSampleMap::reverse_iterator(currentSampleDTSIterator);
1860 auto reverseLastSyncSampleIter = trackBuffer.samples.decodeOrder().findSyncSamplePriorToDecodeIterator(reverseCurrentSampleIter);
1861 if (reverseLastSyncSampleIter == trackBuffer.samples.decodeOrder().rend()) {
1862 trackBuffer.decodeQueue.clear();
1863 m_private->flushAndEnqueueNonDisplayingSamples(Vector<RefPtr<MediaSample>>(), trackID);
1867 Vector<RefPtr<MediaSample>> nonDisplayingSamples;
1868 for (auto iter = reverseLastSyncSampleIter; iter != reverseCurrentSampleIter; --iter)
1869 nonDisplayingSamples.append(iter->second);
1871 m_private->flushAndEnqueueNonDisplayingSamples(nonDisplayingSamples, trackID);
1873 if (!nonDisplayingSamples.isEmpty()) {
1874 trackBuffer.lastEnqueuedPresentationTime = nonDisplayingSamples.last()->presentationTime();
1875 trackBuffer.lastEnqueuedDecodeEndTime = nonDisplayingSamples.last()->decodeTime();
1877 trackBuffer.lastEnqueuedPresentationTime = MediaTime::invalidTime();
1878 trackBuffer.lastEnqueuedDecodeEndTime = MediaTime::invalidTime();
1881 // Fill the decode queue with the remaining samples.
1882 trackBuffer.decodeQueue.clear();
1883 for (auto iter = currentSampleDTSIterator; iter != trackBuffer.samples.decodeOrder().end(); ++iter)
1884 trackBuffer.decodeQueue.insert(*iter);
1885 provideMediaData(trackBuffer, trackID);
1887 trackBuffer.needsReenqueueing = false;
1891 void SourceBuffer::didDropSample()
1894 m_source->mediaElement()->incrementDroppedFrameCount();
1897 void SourceBuffer::monitorBufferingRate()
1899 double now = monotonicallyIncreasingTime();
1900 double interval = now - m_timeOfBufferingMonitor;
1901 double rateSinceLastMonitor = m_bufferedSinceLastMonitor / interval;
1903 m_timeOfBufferingMonitor = now;
1904 m_bufferedSinceLastMonitor = 0;
1906 m_averageBufferRate += (interval * ExponentialMovingAverageCoefficient) * (rateSinceLastMonitor - m_averageBufferRate);
1908 LOG(MediaSource, "SourceBuffer::monitorBufferingRate(%p) - m_avegareBufferRate: %lf", this, m_averageBufferRate);
1911 void SourceBuffer::updateBufferedFromTrackBuffers()
1913 // 3.1 Attributes, buffered
1914 // https://rawgit.com/w3c/media-source/45627646344eea0170dd1cbc5a3d508ca751abb8/media-source-respec.html#dom-sourcebuffer-buffered
1916 // 2. Let highest end time be the largest track buffer ranges end time across all the track buffers managed by this SourceBuffer object.
1917 MediaTime highestEndTime = MediaTime::negativeInfiniteTime();
1918 for (auto& trackBuffer : m_trackBufferMap.values()) {
1919 if (!trackBuffer.buffered.length())
1921 highestEndTime = std::max(highestEndTime, trackBuffer.buffered.maximumBufferedTime());
1924 // NOTE: Short circuit the following if none of the TrackBuffers have buffered ranges to avoid generating
1925 // a single range of {0, 0}.
1926 if (highestEndTime.isNegativeInfinite()) {
1927 m_buffered->ranges() = PlatformTimeRanges();
1931 // 3. Let intersection ranges equal a TimeRange object containing a single range from 0 to highest end time.
1932 PlatformTimeRanges intersectionRanges { MediaTime::zeroTime(), highestEndTime };
1934 // 4. For each audio and video track buffer managed by this SourceBuffer, run the following steps:
1935 for (auto& trackBuffer : m_trackBufferMap.values()) {
1936 // 4.1 Let track ranges equal the track buffer ranges for the current track buffer.
1937 PlatformTimeRanges trackRanges = trackBuffer.buffered;
1938 // 4.2 If readyState is "ended", then set the end time on the last range in track ranges to highest end time.
1939 if (m_source->isEnded())
1940 trackRanges.add(trackRanges.maximumBufferedTime(), highestEndTime);
1942 // 4.3 Let new intersection ranges equal the intersection between the intersection ranges and the track ranges.
1943 // 4.4 Replace the ranges in intersection ranges with the new intersection ranges.
1944 intersectionRanges.intersectWith(trackRanges);
1947 // 5. If intersection ranges does not contain the exact same range information as the current value of this attribute,
1948 // then update the current value of this attribute to intersection ranges.
1949 m_buffered->ranges() = intersectionRanges;
1950 setBufferedDirty(true);
1953 bool SourceBuffer::canPlayThroughRange(PlatformTimeRanges& ranges)
1958 monitorBufferingRate();
1960 // Assuming no fluctuations in the buffering rate, loading 1 second per second or greater
1961 // means indefinite playback. This could be improved by taking jitter into account.
1962 if (m_averageBufferRate > 1)
1965 // Add up all the time yet to be buffered.
1966 MediaTime currentTime = m_source->currentTime();
1967 MediaTime duration = m_source->duration();
1969 PlatformTimeRanges unbufferedRanges = ranges;
1970 unbufferedRanges.invert();
1971 unbufferedRanges.intersectWith(PlatformTimeRanges(currentTime, std::max(currentTime, duration)));
1972 MediaTime unbufferedTime = unbufferedRanges.totalDuration();
1973 if (!unbufferedTime.isValid())
1976 MediaTime timeRemaining = duration - currentTime;
1977 return unbufferedTime.toDouble() / m_averageBufferRate < timeRemaining.toDouble();
1980 size_t SourceBuffer::extraMemoryCost() const
1982 size_t extraMemoryCost = m_pendingAppendData.capacity();
1983 for (auto& trackBuffer : m_trackBufferMap.values())
1984 extraMemoryCost += trackBuffer.samples.sizeInBytes();
1986 return extraMemoryCost;
1989 void SourceBuffer::reportExtraMemoryAllocated()
1991 size_t extraMemoryCost = this->extraMemoryCost();
1992 if (extraMemoryCost <= m_reportedExtraMemoryCost)
1995 size_t extraMemoryCostDelta = extraMemoryCost - m_reportedExtraMemoryCost;
1996 m_reportedExtraMemoryCost = extraMemoryCost;
1998 JSC::JSLockHolder lock(scriptExecutionContext()->vm());
1999 // FIXME: Adopt reportExtraMemoryVisited, and switch to reportExtraMemoryAllocated.
2000 // https://bugs.webkit.org/show_bug.cgi?id=142595
2001 scriptExecutionContext()->vm().heap.deprecatedReportExtraMemory(extraMemoryCostDelta);
2004 Vector<String> SourceBuffer::bufferedSamplesForTrackID(const AtomicString& trackID)
2006 auto it = m_trackBufferMap.find(trackID);
2007 if (it == m_trackBufferMap.end())
2008 return Vector<String>();
2010 TrackBuffer& trackBuffer = it->value;
2011 Vector<String> sampleDescriptions;
2012 for (auto& pair : trackBuffer.samples.decodeOrder())
2013 sampleDescriptions.append(toString(*pair.second));
2015 return sampleDescriptions;
2018 Vector<String> SourceBuffer::enqueuedSamplesForTrackID(const AtomicString& trackID)
2020 return m_private->enqueuedSamplesForTrackID(trackID);
2023 Document& SourceBuffer::document() const
2025 ASSERT(scriptExecutionContext());
2026 return downcast<Document>(*scriptExecutionContext());
2029 ExceptionOr<void> SourceBuffer::setMode(AppendMode newMode)
2031 // 3.1 Attributes - mode
2032 // http://www.w3.org/TR/media-source/#widl-SourceBuffer-mode
2034 // On setting, run the following steps:
2036 // 1. Let new mode equal the new value being assigned to this attribute.
2037 // 2. If generate timestamps flag equals true and new mode equals "segments", then throw an INVALID_ACCESS_ERR exception and abort these steps.
2038 if (m_shouldGenerateTimestamps && newMode == AppendMode::Segments)
2039 return Exception { INVALID_ACCESS_ERR };
2041 // 3. If this object has been removed from the sourceBuffers attribute of the parent media source, then throw an INVALID_STATE_ERR exception and abort these steps.
2042 // 4. If the updating attribute equals true, then throw an INVALID_STATE_ERR exception and abort these steps.
2043 if (isRemoved() || m_updating)
2044 return Exception { INVALID_STATE_ERR };
2046 // 5. If the readyState attribute of the parent media source is in the "ended" state then run the following steps:
2047 if (m_source->isEnded()) {
2048 // 5.1. Set the readyState attribute of the parent media source to "open"
2049 // 5.2. Queue a task to fire a simple event named sourceopen at the parent media source.
2050 m_source->openIfInEndedState();
2053 // 6. If the append state equals PARSING_MEDIA_SEGMENT, then throw an INVALID_STATE_ERR and abort these steps.
2054 if (m_appendState == ParsingMediaSegment)
2055 return Exception { INVALID_STATE_ERR };
2057 // 7. If the new mode equals "sequence", then set the group start timestamp to the group end timestamp.
2058 if (newMode == AppendMode::Sequence)
2059 m_groupStartTimestamp = m_groupEndTimestamp;
2061 // 8. Update the attribute to new mode.
2067 } // namespace WebCore