Remove the Timer parameters from timer callbacks
[WebKit-https.git] / Source / WebCore / Modules / mediasource / SourceBuffer.cpp
1 /*
2  * Copyright (C) 2013 Google Inc. All rights reserved.
3  * Copyright (C) 2013-2014 Apple Inc. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are
7  * met:
8  *
9  *     * Redistributions of source code must retain the above copyright
10  * notice, this list of conditions and the following disclaimer.
11  *     * Redistributions in binary form must reproduce the above
12  * copyright notice, this list of conditions and the following disclaimer
13  * in the documentation and/or other materials provided with the
14  * distribution.
15  *     * Neither the name of Google Inc. nor the names of its
16  * contributors may be used to endorse or promote products derived from
17  * this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31
32 #include "config.h"
33 #include "SourceBuffer.h"
34
35 #if ENABLE(MEDIA_SOURCE)
36
37 #include "AudioTrackList.h"
38 #include "Event.h"
39 #include "ExceptionCodePlaceholder.h"
40 #include "GenericEventQueue.h"
41 #include "HTMLMediaElement.h"
42 #include "InbandTextTrack.h"
43 #include "Logging.h"
44 #include "MediaDescription.h"
45 #include "MediaSample.h"
46 #include "MediaSource.h"
47 #include "SampleMap.h"
48 #include "SourceBufferPrivate.h"
49 #include "TextTrackList.h"
50 #include "TimeRanges.h"
51 #include "VideoTrackList.h"
52 #include <limits>
53 #include <map>
54 #include <runtime/JSCInlines.h>
55 #include <runtime/JSLock.h>
56 #include <runtime/VM.h>
57 #include <wtf/CurrentTime.h>
58 #include <wtf/NeverDestroyed.h>
59 #if !LOG_DISABLED
60 #include <wtf/text/StringBuilder.h>
61 #endif
62
63 namespace WebCore {
64
65 static double ExponentialMovingAverageCoefficient = 0.1;
66
67 // Allow hasCurrentTime() to be off by as much as the length of a 24fps video frame
68 static const MediaTime& currentTimeFudgeFactor()
69 {
70     static NeverDestroyed<MediaTime> fudgeFactor(1, 24);
71     return fudgeFactor;
72 }
73
74 struct SourceBuffer::TrackBuffer {
75     MediaTime lastDecodeTimestamp;
76     MediaTime lastFrameDuration;
77     MediaTime highestPresentationTimestamp;
78     MediaTime lastEnqueuedPresentationTime;
79     MediaTime lastEnqueuedDecodeEndTime;
80     bool needRandomAccessFlag;
81     bool enabled;
82     bool needsReenqueueing;
83     SampleMap samples;
84     DecodeOrderSampleMap::MapType decodeQueue;
85     RefPtr<MediaDescription> description;
86
87     TrackBuffer()
88         : lastDecodeTimestamp(MediaTime::invalidTime())
89         , lastFrameDuration(MediaTime::invalidTime())
90         , highestPresentationTimestamp(MediaTime::invalidTime())
91         , lastEnqueuedPresentationTime(MediaTime::invalidTime())
92         , lastEnqueuedDecodeEndTime(MediaTime::invalidTime())
93         , needRandomAccessFlag(true)
94         , enabled(false)
95         , needsReenqueueing(false)
96     {
97     }
98 };
99
100 PassRef<SourceBuffer> SourceBuffer::create(PassRef<SourceBufferPrivate> sourceBufferPrivate, MediaSource* source)
101 {
102     RefPtr<SourceBuffer> sourceBuffer(adoptRef(new SourceBuffer(WTF::move(sourceBufferPrivate), source)));
103     sourceBuffer->suspendIfNeeded();
104     return sourceBuffer.releaseNonNull();
105 }
106
107 SourceBuffer::SourceBuffer(PassRef<SourceBufferPrivate> sourceBufferPrivate, MediaSource* source)
108     : ActiveDOMObject(source->scriptExecutionContext())
109     , m_private(WTF::move(sourceBufferPrivate))
110     , m_source(source)
111     , m_asyncEventQueue(*this)
112     , m_appendBufferTimer(*this, &SourceBuffer::appendBufferTimerFired)
113     , m_highestPresentationEndTimestamp(MediaTime::invalidTime())
114     , m_buffered(TimeRanges::create())
115     , m_appendState(WaitingForSegment)
116     , m_timeOfBufferingMonitor(monotonicallyIncreasingTime())
117     , m_bufferedSinceLastMonitor(0)
118     , m_averageBufferRate(0)
119     , m_reportedExtraMemoryCost(0)
120     , m_pendingRemoveStart(MediaTime::invalidTime())
121     , m_pendingRemoveEnd(MediaTime::invalidTime())
122     , m_removeTimer(*this, &SourceBuffer::removeTimerFired)
123     , m_updating(false)
124     , m_receivedFirstInitializationSegment(false)
125     , m_active(false)
126     , m_bufferFull(false)
127 {
128     ASSERT(m_source);
129
130     m_private->setClient(this);
131 }
132
133 SourceBuffer::~SourceBuffer()
134 {
135     ASSERT(isRemoved());
136
137     m_private->setClient(0);
138 }
139
140 PassRefPtr<TimeRanges> SourceBuffer::buffered(ExceptionCode& ec) const
141 {
142     // Section 3.1 buffered attribute steps.
143     // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#attributes-1
144     // 1. If this object has been removed from the sourceBuffers attribute of the parent media source then throw an
145     //    INVALID_STATE_ERR exception and abort these steps.
146     if (isRemoved()) {
147         ec = INVALID_STATE_ERR;
148         return nullptr;
149     }
150
151     // 2. Return a new static normalized TimeRanges object for the media segments buffered.
152     return m_buffered->copy();
153 }
154
155 const RefPtr<TimeRanges>& SourceBuffer::buffered() const
156 {
157     return m_buffered;
158 }
159
160 double SourceBuffer::timestampOffset() const
161 {
162     return m_timestampOffset.toDouble();
163 }
164
165 void SourceBuffer::setTimestampOffset(double offset, ExceptionCode& ec)
166 {
167     // Section 3.1 timestampOffset attribute setter steps.
168     // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#attributes-1
169     // 1. Let new timestamp offset equal the new value being assigned to this attribute.
170     // 2. If this object has been removed from the sourceBuffers attribute of the parent media source, then throw an
171     //    INVALID_STATE_ERR exception and abort these steps.
172     // 3. If the updating attribute equals true, then throw an INVALID_STATE_ERR exception and abort these steps.
173     if (isRemoved() || m_updating) {
174         ec = INVALID_STATE_ERR;
175         return;
176     }
177
178     // 4. If the readyState attribute of the parent media source is in the "ended" state then run the following steps:
179     // 4.1 Set the readyState attribute of the parent media source to "open"
180     // 4.2 Queue a task to fire a simple event named sourceopen at the parent media source.
181     m_source->openIfInEndedState();
182
183     // 5. If the append state equals PARSING_MEDIA_SEGMENT, then throw an INVALID_STATE_ERR and abort these steps.
184     if (m_appendState == ParsingMediaSegment) {
185         ec = INVALID_STATE_ERR;
186         return;
187     }
188
189     // FIXME: Add step 6 text when mode attribute is implemented.
190     // 7. Update the attribute to the new value.
191     m_timestampOffset = MediaTime::createWithDouble(offset);
192 }
193
194 void SourceBuffer::appendBuffer(PassRefPtr<ArrayBuffer> data, ExceptionCode& ec)
195 {
196     // Section 3.2 appendBuffer()
197     // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-SourceBuffer-appendBuffer-void-ArrayBufferView-data
198     // 1. If data is null then throw an INVALID_ACCESS_ERR exception and abort these steps.
199     if (!data) {
200         ec = INVALID_ACCESS_ERR;
201         return;
202     }
203
204     appendBufferInternal(static_cast<unsigned char*>(data->data()), data->byteLength(), ec);
205 }
206
207 void SourceBuffer::appendBuffer(PassRefPtr<ArrayBufferView> data, ExceptionCode& ec)
208 {
209     // Section 3.2 appendBuffer()
210     // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-SourceBuffer-appendBuffer-void-ArrayBufferView-data
211     // 1. If data is null then throw an INVALID_ACCESS_ERR exception and abort these steps.
212     if (!data) {
213         ec = INVALID_ACCESS_ERR;
214         return;
215     }
216
217     appendBufferInternal(static_cast<unsigned char*>(data->baseAddress()), data->byteLength(), ec);
218 }
219
220 void SourceBuffer::abort(ExceptionCode& ec)
221 {
222     // Section 3.2 abort() method steps.
223     // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-SourceBuffer-abort-void
224     // 1. If this object has been removed from the sourceBuffers attribute of the parent media source
225     //    then throw an INVALID_STATE_ERR exception and abort these steps.
226     // 2. If the readyState attribute of the parent media source is not in the "open" state
227     //    then throw an INVALID_STATE_ERR exception and abort these steps.
228     if (isRemoved() || !m_source->isOpen()) {
229         ec = INVALID_STATE_ERR;
230         return;
231     }
232
233     // 3. If the sourceBuffer.updating attribute equals true, then run the following steps: ...
234     abortIfUpdating();
235
236     // 4. Run the reset parser state algorithm.
237     m_private->abort();
238
239     // FIXME(229408) Add steps 5-6 update appendWindowStart & appendWindowEnd.
240 }
241
242 void SourceBuffer::remove(double start, double end, ExceptionCode& ec)
243 {
244     remove(MediaTime::createWithDouble(start), MediaTime::createWithDouble(end), ec);
245 }
246
247 void SourceBuffer::remove(const MediaTime& start, const MediaTime& end, ExceptionCode& ec)
248 {
249     LOG(MediaSource, "SourceBuffer::remove(%p) - start(%lf), end(%lf)", this, start.toDouble(), end.toDouble());
250
251     // Section 3.2 remove() method steps.
252     // 1. If start is negative or greater than duration, then throw an InvalidAccessError exception and abort these steps.
253     // 2. If end is less than or equal to start, then throw an InvalidAccessError exception and abort these steps.
254     if (start < MediaTime::zeroTime() || (m_source && (!m_source->duration().isValid() || start > m_source->duration())) || end <= start) {
255         ec = INVALID_ACCESS_ERR;
256         return;
257     }
258
259     // 3. If this object has been removed from the sourceBuffers attribute of the parent media source then throw an
260     //    InvalidStateError exception and abort these steps.
261     // 4. If the updating attribute equals true, then throw an InvalidStateError exception and abort these steps.
262     if (isRemoved() || m_updating) {
263         ec = INVALID_STATE_ERR;
264         return;
265     }
266
267     // 5. If the readyState attribute of the parent media source is in the "ended" state then run the following steps:
268     // 5.1. Set the readyState attribute of the parent media source to "open"
269     // 5.2. Queue a task to fire a simple event named sourceopen at the parent media source .
270     m_source->openIfInEndedState();
271
272     // 6. Set the updating attribute to true.
273     m_updating = true;
274
275     // 7. Queue a task to fire a simple event named updatestart at this SourceBuffer object.
276     scheduleEvent(eventNames().updatestartEvent);
277
278     // 8. Return control to the caller and run the rest of the steps asynchronously.
279     m_pendingRemoveStart = start;
280     m_pendingRemoveEnd = end;
281     m_removeTimer.startOneShot(0);
282 }
283
284 void SourceBuffer::abortIfUpdating()
285 {
286     // Section 3.2 abort() method step 3 substeps.
287     // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-SourceBuffer-abort-void
288
289     if (!m_updating)
290         return;
291
292     // 3.1. Abort the buffer append and stream append loop algorithms if they are running.
293     m_appendBufferTimer.stop();
294     m_pendingAppendData.clear();
295
296     m_removeTimer.stop();
297     m_pendingRemoveStart = MediaTime::invalidTime();
298     m_pendingRemoveEnd = MediaTime::invalidTime();
299
300     // 3.2. Set the updating attribute to false.
301     m_updating = false;
302
303     // 3.3. Queue a task to fire a simple event named abort at this SourceBuffer object.
304     scheduleEvent(eventNames().abortEvent);
305
306     // 3.4. Queue a task to fire a simple event named updateend at this SourceBuffer object.
307     scheduleEvent(eventNames().updateendEvent);
308 }
309
310 void SourceBuffer::removedFromMediaSource()
311 {
312     if (isRemoved())
313         return;
314
315     abortIfUpdating();
316
317     for (auto& trackBufferPair : m_trackBufferMap.values()) {
318         trackBufferPair.samples.clear();
319         trackBufferPair.decodeQueue.clear();
320     }
321
322     m_private->removedFromMediaSource();
323     m_source = 0;
324 }
325
326 void SourceBuffer::seekToTime(const MediaTime& time)
327 {
328     LOG(MediaSource, "SourceBuffer::seekToTime(%p) - time(%s)", this, toString(time).utf8().data());
329
330     for (auto& trackBufferPair : m_trackBufferMap) {
331         TrackBuffer& trackBuffer = trackBufferPair.value;
332         const AtomicString& trackID = trackBufferPair.key;
333
334         trackBuffer.needsReenqueueing = true;
335         reenqueueMediaForTime(trackBuffer, trackID, time);
336     }
337 }
338
339 MediaTime SourceBuffer::sourceBufferPrivateFastSeekTimeForMediaTime(SourceBufferPrivate*, const MediaTime& targetTime, const MediaTime& negativeThreshold, const MediaTime& positiveThreshold)
340 {
341     MediaTime seekTime = targetTime;
342     MediaTime lowerBoundTime = targetTime - negativeThreshold;
343     MediaTime upperBoundTime = targetTime + positiveThreshold;
344
345     for (auto& trackBuffer : m_trackBufferMap.values()) {
346         // Find the sample which contains the target time time.
347         auto futureSyncSampleIterator = trackBuffer.samples.decodeOrder().findSyncSampleAfterPresentationTime(targetTime, positiveThreshold);
348         auto pastSyncSampleIterator = trackBuffer.samples.decodeOrder().findSyncSamplePriorToPresentationTime(targetTime, negativeThreshold);
349         auto upperBound = trackBuffer.samples.decodeOrder().end();
350         auto lowerBound = trackBuffer.samples.decodeOrder().rend();
351
352         if (futureSyncSampleIterator == upperBound && pastSyncSampleIterator == lowerBound)
353             continue;
354
355         MediaTime futureSeekTime = MediaTime::positiveInfiniteTime();
356         if (futureSyncSampleIterator != upperBound) {
357             RefPtr<MediaSample>& sample = futureSyncSampleIterator->second;
358             futureSeekTime = sample->presentationTime();
359         }
360
361         MediaTime pastSeekTime = MediaTime::negativeInfiniteTime();
362         if (pastSyncSampleIterator != lowerBound) {
363             RefPtr<MediaSample>& sample = pastSyncSampleIterator->second;
364             pastSeekTime = sample->presentationTime();
365         }
366
367         MediaTime trackSeekTime = abs(targetTime - futureSeekTime) < abs(targetTime - pastSeekTime) ? futureSeekTime : pastSeekTime;
368         if (abs(targetTime - trackSeekTime) > abs(targetTime - seekTime))
369             seekTime = trackSeekTime;
370     }
371
372     return seekTime;
373 }
374
375 bool SourceBuffer::hasPendingActivity() const
376 {
377     return m_source || m_asyncEventQueue.hasPendingEvents();
378 }
379
380 void SourceBuffer::stop()
381 {
382     m_appendBufferTimer.stop();
383     m_removeTimer.stop();
384 }
385
386 bool SourceBuffer::isRemoved() const
387 {
388     return !m_source;
389 }
390
391 void SourceBuffer::scheduleEvent(const AtomicString& eventName)
392 {
393     RefPtr<Event> event = Event::create(eventName, false, false);
394     event->setTarget(this);
395
396     m_asyncEventQueue.enqueueEvent(event.release());
397 }
398
399 void SourceBuffer::appendBufferInternal(unsigned char* data, unsigned size, ExceptionCode& ec)
400 {
401     // Section 3.2 appendBuffer()
402     // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-SourceBuffer-appendBuffer-void-ArrayBufferView-data
403
404     // Step 1 is enforced by the caller.
405     // 2. Run the prepare append algorithm.
406     // Section 3.5.4 Prepare AppendAlgorithm
407
408     // 1. If the SourceBuffer has been removed from the sourceBuffers attribute of the parent media source
409     // then throw an INVALID_STATE_ERR exception and abort these steps.
410     // 2. If the updating attribute equals true, then throw an INVALID_STATE_ERR exception and abort these steps.
411     if (isRemoved() || m_updating) {
412         ec = INVALID_STATE_ERR;
413         return;
414     }
415
416     // 3. If the readyState attribute of the parent media source is in the "ended" state then run the following steps:
417     // 3.1. Set the readyState attribute of the parent media source to "open"
418     // 3.2. Queue a task to fire a simple event named sourceopen at the parent media source .
419     m_source->openIfInEndedState();
420
421     // 4. Run the coded frame eviction algorithm.
422     evictCodedFrames(size);
423
424     // FIXME: enable this code when MSE libraries have been updated to support it.
425 #if 0
426     // 5. If the buffer full flag equals true, then throw a QUOTA_EXCEEDED_ERR exception and abort these step.
427     if (m_bufferFull) {
428         LOG(MediaSource, "SourceBuffer::appendBufferInternal(%p) -  buffer full, failing with QUOTA_EXCEEDED_ERR error", this);
429         ec = QUOTA_EXCEEDED_ERR;
430         return;
431     }
432 #endif
433
434     // NOTE: Return to 3.2 appendBuffer()
435     // 3. Add data to the end of the input buffer.
436     m_pendingAppendData.append(data, size);
437
438     // 4. Set the updating attribute to true.
439     m_updating = true;
440
441     // 5. Queue a task to fire a simple event named updatestart at this SourceBuffer object.
442     scheduleEvent(eventNames().updatestartEvent);
443
444     // 6. Asynchronously run the buffer append algorithm.
445     m_appendBufferTimer.startOneShot(0);
446
447     reportExtraMemoryCost();
448 }
449
450 void SourceBuffer::appendBufferTimerFired()
451 {
452     if (isRemoved())
453         return;
454
455     ASSERT(m_updating);
456
457     // Section 3.5.5 Buffer Append Algorithm
458     // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#sourcebuffer-buffer-append
459
460     // 1. Run the segment parser loop algorithm.
461     size_t appendSize = m_pendingAppendData.size();
462     if (!appendSize) {
463         // Resize buffer for 0 byte appends so we always have a valid pointer.
464         // We need to convey all appends, even 0 byte ones to |m_private| so
465         // that it can clear its end of stream state if necessary.
466         m_pendingAppendData.resize(1);
467     }
468
469     // Section 3.5.1 Segment Parser Loop
470     // https://dvcs.w3.org/hg/html-media/raw-file/tip/media-source/media-source.html#sourcebuffer-segment-parser-loop
471     // When the segment parser loop algorithm is invoked, run the following steps:
472
473     // 1. Loop Top: If the input buffer is empty, then jump to the need more data step below.
474     if (!m_pendingAppendData.size()) {
475         sourceBufferPrivateAppendComplete(&m_private.get(), AppendSucceeded);
476         return;
477     }
478
479     m_private->append(m_pendingAppendData.data(), appendSize);
480     m_pendingAppendData.clear();
481 }
482
483 void SourceBuffer::sourceBufferPrivateAppendComplete(SourceBufferPrivate*, AppendResult result)
484 {
485     if (isRemoved())
486         return;
487
488     // Section 3.5.5 Buffer Append Algorithm, ctd.
489     // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#sourcebuffer-buffer-append
490
491     // 2. If the input buffer contains bytes that violate the SourceBuffer byte stream format specification,
492     // then run the end of stream algorithm with the error parameter set to "decode" and abort this algorithm.
493     if (result == ParsingFailed) {
494         LOG(MediaSource, "SourceBuffer::sourceBufferPrivateAppendComplete(%p) - result = ParsingFailed", this);
495         m_source->streamEndedWithError(decodeError(), IgnorableExceptionCode());
496         return;
497     }
498
499     // NOTE: Steps 3 - 6 enforced by sourceBufferPrivateDidReceiveInitializationSegment() and
500     // sourceBufferPrivateDidReceiveSample below.
501
502     // 7. Need more data: Return control to the calling algorithm.
503
504     // NOTE: return to Section 3.5.5
505     // 2.If the segment parser loop algorithm in the previous step was aborted, then abort this algorithm.
506     if (result != AppendSucceeded)
507         return;
508
509     // 3. Set the updating attribute to false.
510     m_updating = false;
511
512     // 4. Queue a task to fire a simple event named update at this SourceBuffer object.
513     scheduleEvent(eventNames().updateEvent);
514
515     // 5. Queue a task to fire a simple event named updateend at this SourceBuffer object.
516     scheduleEvent(eventNames().updateendEvent);
517
518     if (m_source)
519         m_source->monitorSourceBuffers();
520
521     MediaTime currentMediaTime = m_source->currentTime();
522     for (auto& trackBufferPair : m_trackBufferMap) {
523         TrackBuffer& trackBuffer = trackBufferPair.value;
524         const AtomicString& trackID = trackBufferPair.key;
525
526         if (trackBuffer.needsReenqueueing) {
527             LOG(MediaSource, "SourceBuffer::sourceBufferPrivateAppendComplete(%p) - reenqueuing at time (%s)", this, toString(currentMediaTime).utf8().data());
528             reenqueueMediaForTime(trackBuffer, trackID, currentMediaTime);
529         } else
530             provideMediaData(trackBuffer, trackID);
531     }
532
533     reportExtraMemoryCost();
534     if (extraMemoryCost() > this->maximumBufferSize())
535         m_bufferFull = true;
536
537     LOG(Media, "SourceBuffer::sourceBufferPrivateAppendComplete(%p) - buffered = %s", this, toString(m_buffered->ranges()).utf8().data());
538 }
539
540 void SourceBuffer::sourceBufferPrivateDidReceiveRenderingError(SourceBufferPrivate*, int error)
541 {
542 #if LOG_DISABLED
543     UNUSED_PARAM(error);
544 #endif
545
546     LOG(MediaSource, "SourceBuffer::sourceBufferPrivateDidReceiveRenderingError(%p) - result = %i", this, error);
547
548     if (!isRemoved())
549         m_source->streamEndedWithError(decodeError(), IgnorableExceptionCode());
550 }
551
552 static bool decodeTimeComparator(const PresentationOrderSampleMap::MapType::value_type& a, const PresentationOrderSampleMap::MapType::value_type& b)
553 {
554     return a.second->decodeTime() < b.second->decodeTime();
555 }
556
557 static PassRefPtr<TimeRanges> removeSamplesFromTrackBuffer(const DecodeOrderSampleMap::MapType& samples, SourceBuffer::TrackBuffer& trackBuffer, const SourceBuffer* buffer, const char* logPrefix)
558 {
559 #if !LOG_DISABLED
560     double earliestSample = std::numeric_limits<double>::infinity();
561     double latestSample = 0;
562     size_t bytesRemoved = 0;
563 #else
564     UNUSED_PARAM(logPrefix);
565     UNUSED_PARAM(buffer);
566 #endif
567
568     RefPtr<TimeRanges> erasedRanges = TimeRanges::create();
569     MediaTime microsecond(1, 1000000);
570     for (auto sampleIt : samples) {
571         const DecodeOrderSampleMap::KeyType& decodeKey = sampleIt.first;
572 #if !LOG_DISABLED
573         size_t startBufferSize = trackBuffer.samples.sizeInBytes();
574 #endif
575
576         RefPtr<MediaSample>& sample = sampleIt.second;
577         LOG(MediaSource, "SourceBuffer::%s(%p) - removing sample(%s)", logPrefix, buffer, toString(*sampleIt.second).utf8().data());
578
579         // Remove the erased samples from the TrackBuffer sample map.
580         trackBuffer.samples.removeSample(sample.get());
581
582         // Also remove the erased samples from the TrackBuffer decodeQueue.
583         trackBuffer.decodeQueue.erase(decodeKey);
584
585         double startTime = sample->presentationTime().toDouble();
586         double endTime = startTime + (sample->duration() + microsecond).toDouble();
587         erasedRanges->add(startTime, endTime);
588
589 #if !LOG_DISABLED
590         bytesRemoved += startBufferSize - trackBuffer.samples.sizeInBytes();
591         if (startTime < earliestSample)
592             earliestSample = startTime;
593         if (endTime > latestSample)
594             latestSample = endTime;
595 #endif
596     }
597
598 #if !LOG_DISABLED
599     if (bytesRemoved)
600         LOG(MediaSource, "SourceBuffer::%s(%p) removed %zu bytes, start(%lf), end(%lf)", logPrefix, buffer, bytesRemoved, earliestSample, latestSample);
601 #endif
602
603     return erasedRanges.release();
604 }
605
606 void SourceBuffer::removeCodedFrames(const MediaTime& start, const MediaTime& end)
607 {
608     LOG(MediaSource, "SourceBuffer::removeCodedFrames(%p) - start(%s), end(%s)", this, toString(start).utf8().data(), toString(end).utf8().data());
609
610     // 3.5.9 Coded Frame Removal Algorithm
611     // https://dvcs.w3.org/hg/html-media/raw-file/tip/media-source/media-source.html#sourcebuffer-coded-frame-removal
612
613     // 1. Let start be the starting presentation timestamp for the removal range.
614     MediaTime durationMediaTime = m_source->duration();
615     MediaTime currentMediaTime = m_source->currentTime();
616
617     // 2. Let end be the end presentation timestamp for the removal range.
618     // 3. For each track buffer in this source buffer, run the following steps:
619     for (auto& iter : m_trackBufferMap) {
620         TrackBuffer& trackBuffer = iter.value;
621
622         // 3.1. Let remove end timestamp be the current value of duration
623         // 3.2 If this track buffer has a random access point timestamp that is greater than or equal to end, then update
624         // remove end timestamp to that random access point timestamp.
625         // NOTE: findSyncSampleAfterPresentationTime will return the next sync sample on or after the presentation time
626         // or decodeOrder().end() if no sync sample exists after that presentation time.
627         DecodeOrderSampleMap::iterator removeDecodeEnd = trackBuffer.samples.decodeOrder().findSyncSampleAfterPresentationTime(end);
628         PresentationOrderSampleMap::iterator removePresentationEnd;
629         if (removeDecodeEnd == trackBuffer.samples.decodeOrder().end())
630             removePresentationEnd = trackBuffer.samples.presentationOrder().end();
631         else
632             removePresentationEnd = trackBuffer.samples.presentationOrder().findSampleWithPresentationTime(removeDecodeEnd->second->presentationTime());
633
634         PresentationOrderSampleMap::iterator removePresentationStart = trackBuffer.samples.presentationOrder().findSampleOnOrAfterPresentationTime(start);
635         if (removePresentationStart == removePresentationEnd)
636             continue;
637
638         // 3.3 Remove all media data, from this track buffer, that contain starting timestamps greater than or equal to
639         // start and less than the remove end timestamp.
640         // NOTE: frames must be removed in decode order, so that all dependant frames between the frame to be removed
641         // and the next sync sample frame are removed. But we must start from the first sample in decode order, not
642         // presentation order.
643         PresentationOrderSampleMap::iterator minDecodeTimeIter = std::min_element(removePresentationStart, removePresentationEnd, decodeTimeComparator);
644         DecodeOrderSampleMap::KeyType decodeKey(minDecodeTimeIter->second->decodeTime(), minDecodeTimeIter->second->presentationTime());
645         DecodeOrderSampleMap::iterator removeDecodeStart = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey(decodeKey);
646
647         DecodeOrderSampleMap::MapType erasedSamples(removeDecodeStart, removeDecodeEnd);
648         RefPtr<TimeRanges> erasedRanges = removeSamplesFromTrackBuffer(erasedSamples, trackBuffer, this, "removeCodedFrames");
649
650         // Only force the TrackBuffer to re-enqueue if the removed ranges overlap with enqueued and possibly
651         // not yet displayed samples.
652         if (currentMediaTime < trackBuffer.lastEnqueuedPresentationTime) {
653             PlatformTimeRanges possiblyEnqueuedRanges(currentMediaTime, trackBuffer.lastEnqueuedPresentationTime);
654             possiblyEnqueuedRanges.intersectWith(erasedRanges->ranges());
655             if (possiblyEnqueuedRanges.length())
656                 trackBuffer.needsReenqueueing = true;
657         }
658
659         erasedRanges->invert();
660         m_buffered->intersectWith(*erasedRanges);
661
662         // 3.4 If this object is in activeSourceBuffers, the current playback position is greater than or equal to start
663         // and less than the remove end timestamp, and HTMLMediaElement.readyState is greater than HAVE_METADATA, then set
664         // the HTMLMediaElement.readyState attribute to HAVE_METADATA and stall playback.
665         if (m_active && currentMediaTime >= start && currentMediaTime < end && m_private->readyState() > MediaPlayer::HaveMetadata)
666             m_private->setReadyState(MediaPlayer::HaveMetadata);
667     }
668
669     // 4. If buffer full flag equals true and this object is ready to accept more bytes, then set the buffer full flag to false.
670     // No-op
671
672     LOG(Media, "SourceBuffer::removeCodedFrames(%p) - buffered = %s", this, toString(m_buffered->ranges()).utf8().data());
673 }
674
675 void SourceBuffer::removeTimerFired()
676 {
677     ASSERT(m_updating);
678     ASSERT(m_pendingRemoveStart.isValid());
679     ASSERT(m_pendingRemoveStart < m_pendingRemoveEnd);
680
681     // Section 3.2 remove() method steps
682     // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#widl-SourceBuffer-remove-void-double-start-double-end
683
684     // 9. Run the coded frame removal algorithm with start and end as the start and end of the removal range.
685     removeCodedFrames(m_pendingRemoveStart, m_pendingRemoveEnd);
686
687     // 10. Set the updating attribute to false.
688     m_updating = false;
689     m_pendingRemoveStart = MediaTime::invalidTime();
690     m_pendingRemoveEnd = MediaTime::invalidTime();
691
692     // 11. Queue a task to fire a simple event named update at this SourceBuffer object.
693     scheduleEvent(eventNames().updateEvent);
694
695     // 12. Queue a task to fire a simple event named updateend at this SourceBuffer object.
696     scheduleEvent(eventNames().updateendEvent);
697 }
698
699 void SourceBuffer::evictCodedFrames(size_t newDataSize)
700 {
701     // 3.5.13 Coded Frame Eviction Algorithm
702     // http://www.w3.org/TR/media-source/#sourcebuffer-coded-frame-eviction
703
704     if (isRemoved())
705         return;
706
707     // This algorithm is run to free up space in this source buffer when new data is appended.
708     // 1. Let new data equal the data that is about to be appended to this SourceBuffer.
709     // 2. If the buffer full flag equals false, then abort these steps.
710     if (!m_bufferFull)
711         return;
712
713     size_t maximumBufferSize = this->maximumBufferSize();
714
715     // 3. Let removal ranges equal a list of presentation time ranges that can be evicted from
716     // the presentation to make room for the new data.
717
718     // NOTE: begin by removing data from the beginning of the buffered ranges, 30 seconds at
719     // a time, up to 30 seconds before currentTime.
720     MediaTime thirtySeconds = MediaTime(30, 1);
721     MediaTime currentTime = m_source->currentTime();
722     MediaTime maximumRangeEnd = currentTime - thirtySeconds;
723
724 #if !LOG_DISABLED
725     LOG(MediaSource, "SourceBuffer::evictCodedFrames(%p) - currentTime = %lf, require %zu bytes, maximum buffer size is %zu", this, m_source->currentTime().toDouble(), extraMemoryCost() + newDataSize, maximumBufferSize);
726     size_t initialBufferedSize = extraMemoryCost();
727 #endif
728
729     MediaTime rangeStart = MediaTime::zeroTime();
730     MediaTime rangeEnd = rangeStart + thirtySeconds;
731     while (rangeStart < maximumRangeEnd) {
732         // 4. For each range in removal ranges, run the coded frame removal algorithm with start and
733         // end equal to the removal range start and end timestamp respectively.
734         removeCodedFrames(rangeStart, std::min(rangeEnd, maximumRangeEnd));
735         if (extraMemoryCost() + newDataSize < maximumBufferSize) {
736             m_bufferFull = false;
737             break;
738         }
739
740         rangeStart += thirtySeconds;
741         rangeEnd += thirtySeconds;
742     }
743
744     if (!m_bufferFull) {
745         LOG(MediaSource, "SourceBuffer::evictCodedFrames(%p) - evicted %zu bytes", this, initialBufferedSize - extraMemoryCost());
746         return;
747     }
748
749     // If there still isn't enough free space and there buffers in time ranges after the current range (ie. there is a gap after
750     // the current buffered range), delete 30 seconds at a time from duration back to the current time range or 30 seconds after
751     // currenTime whichever we hit first.
752     auto buffered = m_buffered->ranges();
753     size_t currentTimeRange = buffered.find(currentTime);
754     if (currentTimeRange == notFound || currentTimeRange == buffered.length() - 1) {
755         LOG(MediaSource, "SourceBuffer::evictCodedFrames(%p) - evicted %zu bytes but FAILED to free enough", this, initialBufferedSize - extraMemoryCost());
756         return;
757     }
758
759     MediaTime minimumRangeStart = currentTime + thirtySeconds;
760
761     rangeEnd = m_source->duration();
762     rangeStart = rangeEnd - thirtySeconds;
763     while (rangeStart > minimumRangeStart) {
764
765         // Do not evict data from the time range that contains currentTime.
766         size_t startTimeRange = buffered.find(rangeStart);
767         if (startTimeRange == currentTimeRange) {
768             size_t endTimeRange = buffered.find(rangeEnd);
769             if (endTimeRange == currentTimeRange)
770                 break;
771
772             rangeEnd = buffered.start(endTimeRange);
773         }
774
775         // 4. For each range in removal ranges, run the coded frame removal algorithm with start and
776         // end equal to the removal range start and end timestamp respectively.
777         removeCodedFrames(std::max(minimumRangeStart, rangeStart), rangeEnd);
778         if (extraMemoryCost() + newDataSize < maximumBufferSize) {
779             m_bufferFull = false;
780             break;
781         }
782
783         rangeStart -= thirtySeconds;
784         rangeEnd -= thirtySeconds;
785     }
786
787     LOG(MediaSource, "SourceBuffer::evictCodedFrames(%p) - evicted %zu bytes%s", this, initialBufferedSize - extraMemoryCost(), m_bufferFull ? "" : " but FAILED to free enough");
788 }
789
790 size_t SourceBuffer::maximumBufferSize() const
791 {
792     if (isRemoved())
793         return 0;
794
795     HTMLMediaElement* element = m_source->mediaElement();
796     if (!element)
797         return 0;
798
799     return element->maximumSourceBufferSize(*this);
800 }
801
802 const AtomicString& SourceBuffer::decodeError()
803 {
804     static NeverDestroyed<AtomicString> decode("decode", AtomicString::ConstructFromLiteral);
805     return decode;
806 }
807
808 const AtomicString& SourceBuffer::networkError()
809 {
810     static NeverDestroyed<AtomicString> network("network", AtomicString::ConstructFromLiteral);
811     return network;
812 }
813
814 VideoTrackList* SourceBuffer::videoTracks()
815 {
816     if (!m_source || !m_source->mediaElement())
817         return nullptr;
818
819     if (!m_videoTracks)
820         m_videoTracks = VideoTrackList::create(m_source->mediaElement(), ActiveDOMObject::scriptExecutionContext());
821
822     return m_videoTracks.get();
823 }
824
825 AudioTrackList* SourceBuffer::audioTracks()
826 {
827     if (!m_source || !m_source->mediaElement())
828         return nullptr;
829
830     if (!m_audioTracks)
831         m_audioTracks = AudioTrackList::create(m_source->mediaElement(), ActiveDOMObject::scriptExecutionContext());
832
833     return m_audioTracks.get();
834 }
835
836 TextTrackList* SourceBuffer::textTracks()
837 {
838     if (!m_source || !m_source->mediaElement())
839         return nullptr;
840
841     if (!m_textTracks)
842         m_textTracks = TextTrackList::create(m_source->mediaElement(), ActiveDOMObject::scriptExecutionContext());
843
844     return m_textTracks.get();
845 }
846
847 void SourceBuffer::setActive(bool active)
848 {
849     if (m_active == active)
850         return;
851
852     m_active = active;
853     m_private->setActive(active);
854     if (!isRemoved())
855         m_source->sourceBufferDidChangeAcitveState(this, active);
856 }
857
858 void SourceBuffer::sourceBufferPrivateDidEndStream(SourceBufferPrivate*, const WTF::AtomicString& error)
859 {
860     LOG(MediaSource, "SourceBuffer::sourceBufferPrivateDidEndStream(%p) - result = %s", this, String(error).utf8().data());
861
862     if (!isRemoved())
863         m_source->streamEndedWithError(error, IgnorableExceptionCode());
864 }
865
866 void SourceBuffer::sourceBufferPrivateDidReceiveInitializationSegment(SourceBufferPrivate*, const InitializationSegment& segment)
867 {
868     if (isRemoved())
869         return;
870
871     LOG(MediaSource, "SourceBuffer::sourceBufferPrivateDidReceiveInitializationSegment(%p)", this);
872
873     // 3.5.7 Initialization Segment Received
874     // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#sourcebuffer-init-segment-received
875     // 1. Update the duration attribute if it currently equals NaN:
876     if (m_source->duration().isInvalid()) {
877         // ↳ If the initialization segment contains a duration:
878         //   Run the duration change algorithm with new duration set to the duration in the initialization segment.
879         // ↳ Otherwise:
880         //   Run the duration change algorithm with new duration set to positive Infinity.
881         MediaTime newDuration = segment.duration.isValid() ? segment.duration : MediaTime::positiveInfiniteTime();
882         m_source->setDurationInternal(newDuration);
883     }
884
885     // 2. If the initialization segment has no audio, video, or text tracks, then run the end of stream
886     // algorithm with the error parameter set to "decode" and abort these steps.
887     if (!segment.audioTracks.size() && !segment.videoTracks.size() && !segment.textTracks.size())
888         m_source->streamEndedWithError(decodeError(), IgnorableExceptionCode());
889
890
891     // 3. If the first initialization segment flag is true, then run the following steps:
892     if (m_receivedFirstInitializationSegment) {
893         if (!validateInitializationSegment(segment)) {
894             m_source->streamEndedWithError(decodeError(), IgnorableExceptionCode());
895             return;
896         }
897         // 3.2 Add the appropriate track descriptions from this initialization segment to each of the track buffers.
898         ASSERT(segment.audioTracks.size() == audioTracks()->length());
899         for (auto& audioTrackInfo : segment.audioTracks) {
900             if (audioTracks()->length() == 1) {
901                 audioTracks()->item(0)->setPrivate(audioTrackInfo.track);
902                 break;
903             }
904
905             auto audioTrack = audioTracks()->getTrackById(audioTrackInfo.track->id());
906             ASSERT(audioTrack);
907             audioTrack->setPrivate(audioTrackInfo.track);
908         }
909
910         ASSERT(segment.videoTracks.size() == videoTracks()->length());
911         for (auto& videoTrackInfo : segment.videoTracks) {
912             if (videoTracks()->length() == 1) {
913                 videoTracks()->item(0)->setPrivate(videoTrackInfo.track);
914                 break;
915             }
916
917             auto videoTrack = videoTracks()->getTrackById(videoTrackInfo.track->id());
918             ASSERT(videoTrack);
919             videoTrack->setPrivate(videoTrackInfo.track);
920         }
921
922         ASSERT(segment.textTracks.size() == textTracks()->length());
923         for (auto& textTrackInfo : segment.textTracks) {
924             if (textTracks()->length() == 1) {
925                 downcast<InbandTextTrack>(*textTracks()->item(0)).setPrivate(textTrackInfo.track);
926                 break;
927             }
928
929             auto textTrack = textTracks()->getTrackById(textTrackInfo.track->id());
930             ASSERT(textTrack);
931             downcast<InbandTextTrack>(*textTrack).setPrivate(textTrackInfo.track);
932         }
933
934         for (auto& trackBuffer : m_trackBufferMap.values())
935             trackBuffer.needRandomAccessFlag = true;
936     }
937
938     // 4. Let active track flag equal false.
939     bool activeTrackFlag = false;
940
941     // 5. If the first initialization segment flag is false, then run the following steps:
942     if (!m_receivedFirstInitializationSegment) {
943         // 5.1 If the initialization segment contains tracks with codecs the user agent does not support,
944         // then run the end of stream algorithm with the error parameter set to "decode" and abort these steps.
945         // NOTE: This check is the responsibility of the SourceBufferPrivate.
946
947         // 5.2 For each audio track in the initialization segment, run following steps:
948         for (auto& audioTrackInfo : segment.audioTracks) {
949             AudioTrackPrivate* audioTrackPrivate = audioTrackInfo.track.get();
950
951             // 5.2.1 Let new audio track be a new AudioTrack object.
952             // 5.2.2 Generate a unique ID and assign it to the id property on new video track.
953             RefPtr<AudioTrack> newAudioTrack = AudioTrack::create(this, audioTrackPrivate);
954             newAudioTrack->setSourceBuffer(this);
955
956             // 5.2.3 If audioTracks.length equals 0, then run the following steps:
957             if (!audioTracks()->length()) {
958                 // 5.2.3.1 Set the enabled property on new audio track to true.
959                 newAudioTrack->setEnabled(true);
960
961                 // 5.2.3.2 Set active track flag to true.
962                 activeTrackFlag = true;
963             }
964
965             // 5.2.4 Add new audio track to the audioTracks attribute on this SourceBuffer object.
966             // 5.2.5 Queue a task to fire a trusted event named addtrack, that does not bubble and is
967             // not cancelable, and that uses the TrackEvent interface, at the AudioTrackList object
968             // referenced by the audioTracks attribute on this SourceBuffer object.
969             audioTracks()->append(newAudioTrack);
970
971             // 5.2.6 Add new audio track to the audioTracks attribute on the HTMLMediaElement.
972             // 5.2.7 Queue a task to fire a trusted event named addtrack, that does not bubble and is
973             // not cancelable, and that uses the TrackEvent interface, at the AudioTrackList object
974             // referenced by the audioTracks attribute on the HTMLMediaElement.
975             m_source->mediaElement()->audioTracks()->append(newAudioTrack);
976
977             // 5.2.8 Create a new track buffer to store coded frames for this track.
978             ASSERT(!m_trackBufferMap.contains(newAudioTrack->id()));
979             TrackBuffer& trackBuffer = m_trackBufferMap.add(newAudioTrack->id(), TrackBuffer()).iterator->value;
980
981             // 5.2.9 Add the track description for this track to the track buffer.
982             trackBuffer.description = audioTrackInfo.description;
983
984             m_audioCodecs.append(trackBuffer.description->codec());
985         }
986
987         // 5.3 For each video track in the initialization segment, run following steps:
988         for (auto& videoTrackInfo : segment.videoTracks) {
989             VideoTrackPrivate* videoTrackPrivate = videoTrackInfo.track.get();
990
991             // 5.3.1 Let new video track be a new VideoTrack object.
992             // 5.3.2 Generate a unique ID and assign it to the id property on new video track.
993             RefPtr<VideoTrack> newVideoTrack = VideoTrack::create(this, videoTrackPrivate);
994             newVideoTrack->setSourceBuffer(this);
995
996             // 5.3.3 If videoTracks.length equals 0, then run the following steps:
997             if (!videoTracks()->length()) {
998                 // 5.3.3.1 Set the selected property on new video track to true.
999                 newVideoTrack->setSelected(true);
1000
1001                 // 5.3.3.2 Set active track flag to true.
1002                 activeTrackFlag = true;
1003             }
1004
1005             // 5.3.4 Add new video track to the videoTracks attribute on this SourceBuffer object.
1006             // 5.3.5 Queue a task to fire a trusted event named addtrack, that does not bubble and is
1007             // not cancelable, and that uses the TrackEvent interface, at the VideoTrackList object
1008             // referenced by the videoTracks attribute on this SourceBuffer object.
1009             videoTracks()->append(newVideoTrack);
1010
1011             // 5.3.6 Add new video track to the videoTracks attribute on the HTMLMediaElement.
1012             // 5.3.7 Queue a task to fire a trusted event named addtrack, that does not bubble and is
1013             // not cancelable, and that uses the TrackEvent interface, at the VideoTrackList object
1014             // referenced by the videoTracks attribute on the HTMLMediaElement.
1015             m_source->mediaElement()->videoTracks()->append(newVideoTrack);
1016
1017             // 5.3.8 Create a new track buffer to store coded frames for this track.
1018             ASSERT(!m_trackBufferMap.contains(newVideoTrack->id()));
1019             TrackBuffer& trackBuffer = m_trackBufferMap.add(newVideoTrack->id(), TrackBuffer()).iterator->value;
1020
1021             // 5.3.9 Add the track description for this track to the track buffer.
1022             trackBuffer.description = videoTrackInfo.description;
1023
1024             m_videoCodecs.append(trackBuffer.description->codec());
1025         }
1026
1027         // 5.4 For each text track in the initialization segment, run following steps:
1028         for (auto& textTrackInfo : segment.textTracks) {
1029             InbandTextTrackPrivate* textTrackPrivate = textTrackInfo.track.get();
1030
1031             // 5.4.1 Let new text track be a new TextTrack object with its properties populated with the
1032             // appropriate information from the initialization segment.
1033             RefPtr<InbandTextTrack> newTextTrack = InbandTextTrack::create(scriptExecutionContext(), this, textTrackPrivate);
1034
1035             // 5.4.2 If the mode property on new text track equals "showing" or "hidden", then set active
1036             // track flag to true.
1037             if (textTrackPrivate->mode() != InbandTextTrackPrivate::Disabled)
1038                 activeTrackFlag = true;
1039
1040             // 5.4.3 Add new text track to the textTracks attribute on this SourceBuffer object.
1041             // 5.4.4 Queue a task to fire a trusted event named addtrack, that does not bubble and is
1042             // not cancelable, and that uses the TrackEvent interface, at textTracks attribute on this
1043             // SourceBuffer object.
1044             textTracks()->append(newTextTrack);
1045
1046             // 5.4.5 Add new text track to the textTracks attribute on the HTMLMediaElement.
1047             // 5.4.6 Queue a task to fire a trusted event named addtrack, that does not bubble and is
1048             // not cancelable, and that uses the TrackEvent interface, at the TextTrackList object
1049             // referenced by the textTracks attribute on the HTMLMediaElement.
1050             m_source->mediaElement()->textTracks()->append(newTextTrack);
1051
1052             // 5.4.7 Create a new track buffer to store coded frames for this track.
1053             ASSERT(!m_trackBufferMap.contains(textTrackPrivate->id()));
1054             TrackBuffer& trackBuffer = m_trackBufferMap.add(textTrackPrivate->id(), TrackBuffer()).iterator->value;
1055
1056             // 5.4.8 Add the track description for this track to the track buffer.
1057             trackBuffer.description = textTrackInfo.description;
1058
1059             m_textCodecs.append(trackBuffer.description->codec());
1060         }
1061
1062         // 5.5 If active track flag equals true, then run the following steps:
1063         if (activeTrackFlag) {
1064             // 5.5.1 Add this SourceBuffer to activeSourceBuffers.
1065             setActive(true);
1066         }
1067
1068         // 5.6 Set first initialization segment flag to true.
1069         m_receivedFirstInitializationSegment = true;
1070     }
1071
1072     // 6. If the HTMLMediaElement.readyState attribute is HAVE_NOTHING, then run the following steps:
1073     if (m_private->readyState() == MediaPlayer::HaveNothing) {
1074         // 6.1 If one or more objects in sourceBuffers have first initialization segment flag set to false, then abort these steps.
1075         for (auto& sourceBuffer : *m_source->sourceBuffers()) {
1076             if (!sourceBuffer->m_receivedFirstInitializationSegment)
1077                 return;
1078         }
1079
1080         // 6.2 Set the HTMLMediaElement.readyState attribute to HAVE_METADATA.
1081         // 6.3 Queue a task to fire a simple event named loadedmetadata at the media element.
1082         m_private->setReadyState(MediaPlayer::HaveMetadata);
1083     }
1084
1085     // 7. If the active track flag equals true and the HTMLMediaElement.readyState
1086     // attribute is greater than HAVE_CURRENT_DATA, then set the HTMLMediaElement.readyState
1087     // attribute to HAVE_METADATA.
1088     if (activeTrackFlag && m_private->readyState() > MediaPlayer::HaveCurrentData)
1089         m_private->setReadyState(MediaPlayer::HaveMetadata);
1090 }
1091
1092 bool SourceBuffer::validateInitializationSegment(const InitializationSegment& segment)
1093 {
1094     // 3.5.7 Initialization Segment Received (ctd)
1095     // https://dvcs.w3.org/hg/html-media/raw-file/default/media-source/media-source.html#sourcebuffer-init-segment-received
1096
1097     // 3.1. Verify the following properties. If any of the checks fail then run the end of stream
1098     // algorithm with the error parameter set to "decode" and abort these steps.
1099     //   * The number of audio, video, and text tracks match what was in the first initialization segment.
1100     if (segment.audioTracks.size() != audioTracks()->length()
1101         || segment.videoTracks.size() != videoTracks()->length()
1102         || segment.textTracks.size() != textTracks()->length())
1103         return false;
1104
1105     //   * The codecs for each track, match what was specified in the first initialization segment.
1106     for (auto& audioTrackInfo : segment.audioTracks) {
1107         if (!m_audioCodecs.contains(audioTrackInfo.description->codec()))
1108             return false;
1109     }
1110
1111     for (auto& videoTrackInfo : segment.videoTracks) {
1112         if (!m_videoCodecs.contains(videoTrackInfo.description->codec()))
1113             return false;
1114     }
1115
1116     for (auto& textTrackInfo : segment.textTracks) {
1117         if (!m_textCodecs.contains(textTrackInfo.description->codec()))
1118             return false;
1119     }
1120
1121     //   * If more than one track for a single type are present (ie 2 audio tracks), then the Track
1122     //   IDs match the ones in the first initialization segment.
1123     if (segment.audioTracks.size() >= 2) {
1124         for (auto& audioTrackInfo : segment.audioTracks) {
1125             if (!m_trackBufferMap.contains(audioTrackInfo.track->id()))
1126                 return false;
1127         }
1128     }
1129
1130     if (segment.videoTracks.size() >= 2) {
1131         for (auto& videoTrackInfo : segment.videoTracks) {
1132             if (!m_trackBufferMap.contains(videoTrackInfo.track->id()))
1133                 return false;
1134         }
1135     }
1136
1137     if (segment.textTracks.size() >= 2) {
1138         for (auto& textTrackInfo : segment.videoTracks) {
1139             if (!m_trackBufferMap.contains(textTrackInfo.track->id()))
1140                 return false;
1141         }
1142     }
1143
1144     return true;
1145 }
1146
1147 class SampleLessThanComparator {
1148 public:
1149     bool operator()(std::pair<MediaTime, RefPtr<MediaSample>> value1, std::pair<MediaTime, RefPtr<MediaSample>> value2)
1150     {
1151         return value1.first < value2.first;
1152     }
1153
1154     bool operator()(MediaTime value1, std::pair<MediaTime, RefPtr<MediaSample>> value2)
1155     {
1156         return value1 < value2.first;
1157     }
1158
1159     bool operator()(std::pair<MediaTime, RefPtr<MediaSample>> value1, MediaTime value2)
1160     {
1161         return value1.first < value2;
1162     }
1163 };
1164
1165 void SourceBuffer::sourceBufferPrivateDidReceiveSample(SourceBufferPrivate*, PassRefPtr<MediaSample> prpSample)
1166 {
1167     if (isRemoved())
1168         return;
1169
1170     RefPtr<MediaSample> sample = prpSample;
1171
1172     // 3.5.8 Coded Frame Processing
1173     // When complete coded frames have been parsed by the segment parser loop then the following steps
1174     // are run:
1175     // 1. For each coded frame in the media segment run the following steps:
1176     // 1.1. Loop Top
1177     do {
1178         // 1.1 (ctd) Let presentation timestamp be a double precision floating point representation of
1179         // the coded frame's presentation timestamp in seconds.
1180         MediaTime presentationTimestamp = sample->presentationTime();
1181
1182         // 1.2 Let decode timestamp be a double precision floating point representation of the coded frame's
1183         // decode timestamp in seconds.
1184         MediaTime decodeTimestamp = sample->decodeTime();
1185
1186         // 1.3 Let frame duration be a double precision floating point representation of the coded frame's
1187         // duration in seconds.
1188         MediaTime frameDuration = sample->duration();
1189
1190         // 1.4 If mode equals "sequence" and group start timestamp is set, then run the following steps:
1191         // FIXME: add support for "sequence" mode
1192
1193         // 1.5 If timestampOffset is not 0, then run the following steps:
1194         if (m_timestampOffset) {
1195             // 1.5.1 Add timestampOffset to the presentation timestamp.
1196             presentationTimestamp += m_timestampOffset;
1197
1198             // 1.5.2 Add timestampOffset to the decode timestamp.
1199             decodeTimestamp += m_timestampOffset;
1200
1201             // 1.5.3 If the presentation timestamp or decode timestamp is less than the presentation start
1202             // time, then run the end of stream algorithm with the error parameter set to "decode", and
1203             // abort these steps.
1204             MediaTime presentationStartTime = MediaTime::zeroTime();
1205             if (presentationTimestamp < presentationStartTime || decodeTimestamp < presentationStartTime) {
1206 #if !LOG_DISABLED
1207                 LOG(MediaSource, "SourceBuffer::sourceBufferPrivateDidReceiveSample(%p) - failing because %s", this, presentationTimestamp < presentationStartTime ? "presentationTimestamp < presentationStartTime" : "decodeTimestamp < presentationStartTime");
1208 #endif
1209                 m_source->streamEndedWithError(decodeError(), IgnorableExceptionCode());
1210                 return;
1211             }
1212         }
1213
1214         // 1.6 Let track buffer equal the track buffer that the coded frame will be added to.
1215         AtomicString trackID = sample->trackID();
1216         auto it = m_trackBufferMap.find(trackID);
1217         if (it == m_trackBufferMap.end())
1218             it = m_trackBufferMap.add(trackID, TrackBuffer()).iterator;
1219         TrackBuffer& trackBuffer = it->value;
1220
1221         // 1.7 If last decode timestamp for track buffer is set and decode timestamp is less than last
1222         // decode timestamp:
1223         // OR
1224         // If last decode timestamp for track buffer is set and the difference between decode timestamp and
1225         // last decode timestamp is greater than 2 times last frame duration:
1226         if (trackBuffer.lastDecodeTimestamp.isValid() && (decodeTimestamp < trackBuffer.lastDecodeTimestamp
1227             || abs(decodeTimestamp - trackBuffer.lastDecodeTimestamp) > (trackBuffer.lastFrameDuration * 2))) {
1228             // 1.7.1 If mode equals "segments":
1229             // Set highest presentation end timestamp to presentation timestamp.
1230             m_highestPresentationEndTimestamp = presentationTimestamp;
1231
1232             // If mode equals "sequence":
1233             // Set group start timestamp equal to the highest presentation end timestamp.
1234             // FIXME: Add support for "sequence" mode.
1235
1236             for (auto& trackBuffer : m_trackBufferMap.values()) {
1237                 // 1.7.2 Unset the last decode timestamp on all track buffers.
1238                 trackBuffer.lastDecodeTimestamp = MediaTime::invalidTime();
1239                 // 1.7.3 Unset the last frame duration on all track buffers.
1240                 trackBuffer.lastFrameDuration = MediaTime::invalidTime();
1241                 // 1.7.4 Unset the highest presentation timestamp on all track buffers.
1242                 trackBuffer.highestPresentationTimestamp = MediaTime::invalidTime();
1243                 // 1.7.5 Set the need random access point flag on all track buffers to true.
1244                 trackBuffer.needRandomAccessFlag = true;
1245             }
1246
1247             // 1.7.6 Jump to the Loop Top step above to restart processing of the current coded frame.
1248             continue;
1249         }
1250
1251         // 1.8 Let frame end timestamp equal the sum of presentation timestamp and frame duration.
1252         MediaTime frameEndTimestamp = presentationTimestamp + frameDuration;
1253
1254         // 1.9 If presentation timestamp is less than appendWindowStart, then set the need random access
1255         // point flag to true, drop the coded frame, and jump to the top of the loop to start processing
1256         // the next coded frame.
1257         // 1.10 If frame end timestamp is greater than appendWindowEnd, then set the need random access
1258         // point flag to true, drop the coded frame, and jump to the top of the loop to start processing
1259         // the next coded frame.
1260         // FIXME: implement append windows
1261
1262         // 1.11 If the need random access point flag on track buffer equals true, then run the following steps:
1263         if (trackBuffer.needRandomAccessFlag) {
1264             // 1.11.1 If the coded frame is not a random access point, then drop the coded frame and jump
1265             // to the top of the loop to start processing the next coded frame.
1266             if (!sample->isSync()) {
1267                 didDropSample();
1268                 return;
1269             }
1270
1271             // 1.11.2 Set the need random access point flag on track buffer to false.
1272             trackBuffer.needRandomAccessFlag = false;
1273         }
1274
1275         // 1.12 Let spliced audio frame be an unset variable for holding audio splice information
1276         // 1.13 Let spliced timed text frame be an unset variable for holding timed text splice information
1277         // FIXME: Add support for sample splicing.
1278
1279         SampleMap erasedSamples;
1280         MediaTime microsecond(1, 1000000);
1281
1282         // 1.14 If last decode timestamp for track buffer is unset and presentation timestamp falls
1283         // falls within the presentation interval of a coded frame in track buffer, then run the
1284         // following steps:
1285         if (trackBuffer.lastDecodeTimestamp.isInvalid()) {
1286             auto iter = trackBuffer.samples.presentationOrder().findSampleContainingPresentationTime(presentationTimestamp);
1287             if (iter != trackBuffer.samples.presentationOrder().end()) {
1288                 // 1.14.1 Let overlapped frame be the coded frame in track buffer that matches the condition above.
1289                 RefPtr<MediaSample> overlappedFrame = iter->second;
1290
1291                 // 1.14.2 If track buffer contains audio coded frames:
1292                 // Run the audio splice frame algorithm and if a splice frame is returned, assign it to
1293                 // spliced audio frame.
1294                 // FIXME: Add support for sample splicing.
1295
1296                 // If track buffer contains video coded frames:
1297                 if (trackBuffer.description->isVideo()) {
1298                     // 1.14.2.1 Let overlapped frame presentation timestamp equal the presentation timestamp
1299                     // of overlapped frame.
1300                     MediaTime overlappedFramePresentationTimestamp = overlappedFrame->presentationTime();
1301
1302                     // 1.14.2.2 Let remove window timestamp equal overlapped frame presentation timestamp
1303                     // plus 1 microsecond.
1304                     MediaTime removeWindowTimestamp = overlappedFramePresentationTimestamp + microsecond;
1305
1306                     // 1.14.2.3 If the presentation timestamp is less than the remove window timestamp,
1307                     // then remove overlapped frame and any coded frames that depend on it from track buffer.
1308                     if (presentationTimestamp < removeWindowTimestamp)
1309                         erasedSamples.addSample(iter->second);
1310                 }
1311
1312                 // If track buffer contains timed text coded frames:
1313                 // Run the text splice frame algorithm and if a splice frame is returned, assign it to spliced timed text frame.
1314                 // FIXME: Add support for sample splicing.
1315             }
1316         }
1317
1318         // 1.15 Remove existing coded frames in track buffer:
1319         // If highest presentation timestamp for track buffer is not set:
1320         if (trackBuffer.highestPresentationTimestamp.isInvalid()) {
1321             // Remove all coded frames from track buffer that have a presentation timestamp greater than or
1322             // equal to presentation timestamp and less than frame end timestamp.
1323             auto iter_pair = trackBuffer.samples.presentationOrder().findSamplesBetweenPresentationTimes(presentationTimestamp, frameEndTimestamp);
1324             if (iter_pair.first != trackBuffer.samples.presentationOrder().end())
1325                 erasedSamples.addRange(iter_pair.first, iter_pair.second);
1326         }
1327
1328         // If highest presentation timestamp for track buffer is set and less than presentation timestamp
1329         if (trackBuffer.highestPresentationTimestamp.isValid() && trackBuffer.highestPresentationTimestamp <= presentationTimestamp) {
1330             // Remove all coded frames from track buffer that have a presentation timestamp greater than highest
1331             // presentation timestamp and less than or equal to frame end timestamp.
1332             do {
1333                 // NOTE: Searching from the end of the trackBuffer will be vastly more efficient if the search range is
1334                 // near the end of the buffered range. Use a linear-backwards search if the search range is within one
1335                 // frame duration of the end:
1336                 if (!m_buffered)
1337                     break;
1338
1339                 unsigned bufferedLength = m_buffered->ranges().length();
1340                 if (!bufferedLength)
1341                     break;
1342
1343                 bool ignoreValid;
1344                 MediaTime highestBufferedTime = m_buffered->ranges().end(bufferedLength - 1, ignoreValid);
1345
1346                 PresentationOrderSampleMap::iterator_range range;
1347                 if (highestBufferedTime - trackBuffer.highestPresentationTimestamp < trackBuffer.lastFrameDuration)
1348                     range = trackBuffer.samples.presentationOrder().findSamplesWithinPresentationRangeFromEnd(trackBuffer.highestPresentationTimestamp, frameEndTimestamp);
1349                 else
1350                     range = trackBuffer.samples.presentationOrder().findSamplesWithinPresentationRange(trackBuffer.highestPresentationTimestamp, frameEndTimestamp);
1351
1352                 if (range.first != trackBuffer.samples.presentationOrder().end())
1353                     erasedSamples.addRange(range.first, range.second);
1354             } while(false);
1355         }
1356
1357         // 1.16 Remove decoding dependencies of the coded frames removed in the previous step:
1358         DecodeOrderSampleMap::MapType dependentSamples;
1359         if (!erasedSamples.empty()) {
1360             // If detailed information about decoding dependencies is available:
1361             // FIXME: Add support for detailed dependency information
1362
1363             // Otherwise: Remove all coded frames between the coded frames removed in the previous step
1364             // and the next random access point after those removed frames.
1365             auto firstDecodeIter = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey(erasedSamples.decodeOrder().begin()->first);
1366             auto lastDecodeIter = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey(erasedSamples.decodeOrder().rbegin()->first);
1367             auto nextSyncIter = trackBuffer.samples.decodeOrder().findSyncSampleAfterDecodeIterator(lastDecodeIter);
1368             dependentSamples.insert(firstDecodeIter, nextSyncIter);
1369
1370             RefPtr<TimeRanges> erasedRanges = removeSamplesFromTrackBuffer(dependentSamples, trackBuffer, this, "sourceBufferPrivateDidReceiveSample");
1371
1372             // Only force the TrackBuffer to re-enqueue if the removed ranges overlap with enqueued and possibly
1373             // not yet displayed samples.
1374             MediaTime currentMediaTime = m_source->currentTime();
1375             if (currentMediaTime < trackBuffer.lastEnqueuedPresentationTime) {
1376                 PlatformTimeRanges possiblyEnqueuedRanges(currentMediaTime, trackBuffer.lastEnqueuedPresentationTime);
1377                 possiblyEnqueuedRanges.intersectWith(erasedRanges->ranges());
1378                 if (possiblyEnqueuedRanges.length())
1379                     trackBuffer.needsReenqueueing = true;
1380             }
1381
1382             erasedRanges->invert();
1383             m_buffered->intersectWith(*erasedRanges);
1384         }
1385
1386         // 1.17 If spliced audio frame is set:
1387         // Add spliced audio frame to the track buffer.
1388         // If spliced timed text frame is set:
1389         // Add spliced timed text frame to the track buffer.
1390         // FIXME: Add support for sample splicing.
1391
1392         // Otherwise:
1393         // Add the coded frame with the presentation timestamp, decode timestamp, and frame duration to the track buffer.
1394         trackBuffer.samples.addSample(sample);
1395
1396         if (trackBuffer.lastEnqueuedDecodeEndTime.isInvalid() || decodeTimestamp >= trackBuffer.lastEnqueuedDecodeEndTime) {
1397             DecodeOrderSampleMap::KeyType decodeKey(decodeTimestamp, presentationTimestamp);
1398             trackBuffer.decodeQueue.insert(DecodeOrderSampleMap::MapType::value_type(decodeKey, sample));
1399         }
1400
1401         // 1.18 Set last decode timestamp for track buffer to decode timestamp.
1402         trackBuffer.lastDecodeTimestamp = decodeTimestamp;
1403
1404         // 1.19 Set last frame duration for track buffer to frame duration.
1405         trackBuffer.lastFrameDuration = frameDuration;
1406
1407         // 1.20 If highest presentation timestamp for track buffer is unset or frame end timestamp is greater
1408         // than highest presentation timestamp, then set highest presentation timestamp for track buffer
1409         // to frame end timestamp.
1410         if (trackBuffer.highestPresentationTimestamp.isInvalid() || frameEndTimestamp > trackBuffer.highestPresentationTimestamp)
1411             trackBuffer.highestPresentationTimestamp = frameEndTimestamp;
1412
1413         // 1.21 If highest presentation end timestamp is unset or frame end timestamp is greater than highest
1414         // presentation end timestamp, then set highest presentation end timestamp equal to frame end timestamp.
1415         if (m_highestPresentationEndTimestamp.isInvalid() || frameEndTimestamp > m_highestPresentationEndTimestamp)
1416             m_highestPresentationEndTimestamp = frameEndTimestamp;
1417
1418         m_buffered->add(presentationTimestamp.toDouble(), (presentationTimestamp + frameDuration + microsecond).toDouble());
1419         m_bufferedSinceLastMonitor += frameDuration.toDouble();
1420
1421         break;
1422     } while (1);
1423
1424     // Steps 2-4 will be handled by MediaSource::monitorSourceBuffers()
1425
1426     // 5. If the media segment contains data beyond the current duration, then run the duration change algorithm with new
1427     // duration set to the maximum of the current duration and the highest end timestamp reported by HTMLMediaElement.buffered.
1428     if (highestPresentationEndTimestamp() > m_source->duration())
1429         m_source->setDurationInternal(highestPresentationEndTimestamp());
1430 }
1431
1432 bool SourceBuffer::hasAudio() const
1433 {
1434     return m_audioTracks && m_audioTracks->length();
1435 }
1436
1437 bool SourceBuffer::hasVideo() const
1438 {
1439     return m_videoTracks && m_videoTracks->length();
1440 }
1441
1442 bool SourceBuffer::sourceBufferPrivateHasAudio(const SourceBufferPrivate*) const
1443 {
1444     return hasAudio();
1445 }
1446
1447 bool SourceBuffer::sourceBufferPrivateHasVideo(const SourceBufferPrivate*) const
1448 {
1449     return hasVideo();
1450 }
1451
1452 void SourceBuffer::videoTrackSelectedChanged(VideoTrack* track)
1453 {
1454     // 2.4.5 Changes to selected/enabled track state
1455     // If the selected video track changes, then run the following steps:
1456     // 1. If the SourceBuffer associated with the previously selected video track is not associated with
1457     // any other enabled tracks, run the following steps:
1458     if (track->selected()
1459         && (!m_videoTracks || !m_videoTracks->isAnyTrackEnabled())
1460         && (!m_audioTracks || !m_audioTracks->isAnyTrackEnabled())
1461         && (!m_textTracks || !m_textTracks->isAnyTrackEnabled())) {
1462         // 1.1 Remove the SourceBuffer from activeSourceBuffers.
1463         // 1.2 Queue a task to fire a simple event named removesourcebuffer at activeSourceBuffers
1464         setActive(false);
1465     } else if (!track->selected()) {
1466         // 2. If the SourceBuffer associated with the newly selected video track is not already in activeSourceBuffers,
1467         // run the following steps:
1468         // 2.1 Add the SourceBuffer to activeSourceBuffers.
1469         // 2.2 Queue a task to fire a simple event named addsourcebuffer at activeSourceBuffers
1470         setActive(true);
1471     }
1472
1473     if (!isRemoved())
1474         m_source->mediaElement()->videoTrackSelectedChanged(track);
1475 }
1476
1477 void SourceBuffer::audioTrackEnabledChanged(AudioTrack* track)
1478 {
1479     // 2.4.5 Changes to selected/enabled track state
1480     // If an audio track becomes disabled and the SourceBuffer associated with this track is not
1481     // associated with any other enabled or selected track, then run the following steps:
1482     if (track->enabled()
1483         && (!m_videoTracks || !m_videoTracks->isAnyTrackEnabled())
1484         && (!m_audioTracks || !m_audioTracks->isAnyTrackEnabled())
1485         && (!m_textTracks || !m_textTracks->isAnyTrackEnabled())) {
1486         // 1. Remove the SourceBuffer associated with the audio track from activeSourceBuffers
1487         // 2. Queue a task to fire a simple event named removesourcebuffer at activeSourceBuffers
1488         setActive(false);
1489     } else if (!track->enabled()) {
1490         // If an audio track becomes enabled and the SourceBuffer associated with this track is
1491         // not already in activeSourceBuffers, then run the following steps:
1492         // 1. Add the SourceBuffer associated with the audio track to activeSourceBuffers
1493         // 2. Queue a task to fire a simple event named addsourcebuffer at activeSourceBuffers
1494         setActive(true);
1495     }
1496
1497     if (!isRemoved())
1498         m_source->mediaElement()->audioTrackEnabledChanged(track);
1499 }
1500
1501 void SourceBuffer::textTrackModeChanged(TextTrack* track)
1502 {
1503     // 2.4.5 Changes to selected/enabled track state
1504     // If a text track mode becomes "disabled" and the SourceBuffer associated with this track is not
1505     // associated with any other enabled or selected track, then run the following steps:
1506     if (track->mode() == TextTrack::disabledKeyword()
1507         && (!m_videoTracks || !m_videoTracks->isAnyTrackEnabled())
1508         && (!m_audioTracks || !m_audioTracks->isAnyTrackEnabled())
1509         && (!m_textTracks || !m_textTracks->isAnyTrackEnabled())) {
1510         // 1. Remove the SourceBuffer associated with the audio track from activeSourceBuffers
1511         // 2. Queue a task to fire a simple event named removesourcebuffer at activeSourceBuffers
1512         setActive(false);
1513     } else {
1514         // If a text track mode becomes "showing" or "hidden" and the SourceBuffer associated with this
1515         // track is not already in activeSourceBuffers, then run the following steps:
1516         // 1. Add the SourceBuffer associated with the text track to activeSourceBuffers
1517         // 2. Queue a task to fire a simple event named addsourcebuffer at activeSourceBuffers
1518         setActive(true);
1519     }
1520
1521     if (!isRemoved())
1522         m_source->mediaElement()->textTrackModeChanged(track);
1523 }
1524
1525 void SourceBuffer::textTrackAddCue(TextTrack* track, WTF::PassRefPtr<TextTrackCue> cue)
1526 {
1527     if (!isRemoved())
1528         m_source->mediaElement()->textTrackAddCue(track, cue);
1529 }
1530
1531 void SourceBuffer::textTrackAddCues(TextTrack* track, TextTrackCueList const* cueList)
1532 {
1533     if (!isRemoved())
1534         m_source->mediaElement()->textTrackAddCues(track, cueList);
1535 }
1536
1537 void SourceBuffer::textTrackRemoveCue(TextTrack* track, WTF::PassRefPtr<TextTrackCue> cue)
1538 {
1539     if (!isRemoved())
1540         m_source->mediaElement()->textTrackRemoveCue(track, cue);
1541 }
1542
1543 void SourceBuffer::textTrackRemoveCues(TextTrack* track, TextTrackCueList const* cueList)
1544 {
1545     if (!isRemoved())
1546         m_source->mediaElement()->textTrackRemoveCues(track, cueList);
1547 }
1548
1549 void SourceBuffer::textTrackKindChanged(TextTrack* track)
1550 {
1551     if (!isRemoved())
1552         m_source->mediaElement()->textTrackKindChanged(track);
1553 }
1554
1555 void SourceBuffer::sourceBufferPrivateDidBecomeReadyForMoreSamples(SourceBufferPrivate*, AtomicString trackID)
1556 {
1557     LOG(MediaSource, "SourceBuffer::sourceBufferPrivateDidBecomeReadyForMoreSamples(%p)", this);
1558     auto it = m_trackBufferMap.find(trackID);
1559     if (it == m_trackBufferMap.end())
1560         return;
1561
1562     TrackBuffer& trackBuffer = it->value;
1563     if (!trackBuffer.needsReenqueueing && !m_source->isSeeking())
1564         provideMediaData(trackBuffer, trackID);
1565 }
1566
1567 void SourceBuffer::provideMediaData(TrackBuffer& trackBuffer, AtomicString trackID)
1568 {
1569 #if !LOG_DISABLED
1570     unsigned enqueuedSamples = 0;
1571 #endif
1572
1573     auto sampleIt = trackBuffer.decodeQueue.begin();
1574     for (auto sampleEnd = trackBuffer.decodeQueue.end(); sampleIt != sampleEnd; ++sampleIt) {
1575         if (!m_private->isReadyForMoreSamples(trackID)) {
1576             m_private->notifyClientWhenReadyForMoreSamples(trackID);
1577             break;
1578         }
1579
1580         RefPtr<MediaSample> sample = sampleIt->second;
1581         // Do not enqueue samples spanning a significant unbuffered gap.
1582         // NOTE: one second is somewhat arbitrary. MediaSource::monitorSourceBuffers() is run
1583         // on the playbackTimer, which is effectively every 350ms. Allowing > 350ms gap between
1584         // enqueued samples allows for situations where we overrun the end of a buffered range
1585         // but don't notice for 350s of playback time, and the client can enqueue data for the
1586         // new current time without triggering this early return.
1587         // FIXME(135867): Make this gap detection logic less arbitrary.
1588         MediaTime oneSecond(1, 1);
1589         if (trackBuffer.lastEnqueuedDecodeEndTime.isValid() && sample->decodeTime() - trackBuffer.lastEnqueuedDecodeEndTime > oneSecond)
1590             break;
1591
1592         trackBuffer.lastEnqueuedPresentationTime = sample->presentationTime();
1593         trackBuffer.lastEnqueuedDecodeEndTime = sample->decodeTime() + sample->duration();
1594         m_private->enqueueSample(sample.release(), trackID);
1595 #if !LOG_DISABLED
1596         ++enqueuedSamples;
1597 #endif
1598
1599     }
1600     trackBuffer.decodeQueue.erase(trackBuffer.decodeQueue.begin(), sampleIt);
1601
1602     LOG(MediaSource, "SourceBuffer::provideMediaData(%p) - Enqueued %u samples", this, enqueuedSamples);
1603 }
1604
1605 void SourceBuffer::reenqueueMediaForTime(TrackBuffer& trackBuffer, AtomicString trackID, const MediaTime& time)
1606 {
1607     // Find the sample which contains the current presentation time.
1608     auto currentSamplePTSIterator = trackBuffer.samples.presentationOrder().findSampleContainingPresentationTime(time);
1609
1610     if (currentSamplePTSIterator == trackBuffer.samples.presentationOrder().end()) {
1611         trackBuffer.decodeQueue.clear();
1612         m_private->flushAndEnqueueNonDisplayingSamples(Vector<RefPtr<MediaSample>>(), trackID);
1613         return;
1614     }
1615
1616     // Seach backward for the previous sync sample.
1617     DecodeOrderSampleMap::KeyType decodeKey(currentSamplePTSIterator->second->decodeTime(), currentSamplePTSIterator->second->presentationTime());
1618     auto currentSampleDTSIterator = trackBuffer.samples.decodeOrder().findSampleWithDecodeKey(decodeKey);
1619     ASSERT(currentSampleDTSIterator != trackBuffer.samples.decodeOrder().end());
1620
1621     auto reverseCurrentSampleIter = --DecodeOrderSampleMap::reverse_iterator(currentSampleDTSIterator);
1622     auto reverseLastSyncSampleIter = trackBuffer.samples.decodeOrder().findSyncSamplePriorToDecodeIterator(reverseCurrentSampleIter);
1623     if (reverseLastSyncSampleIter == trackBuffer.samples.decodeOrder().rend()) {
1624         trackBuffer.decodeQueue.clear();
1625         m_private->flushAndEnqueueNonDisplayingSamples(Vector<RefPtr<MediaSample>>(), trackID);
1626         return;
1627     }
1628
1629     Vector<RefPtr<MediaSample>> nonDisplayingSamples;
1630     for (auto iter = reverseLastSyncSampleIter; iter != reverseCurrentSampleIter; --iter)
1631         nonDisplayingSamples.append(iter->second);
1632
1633     m_private->flushAndEnqueueNonDisplayingSamples(nonDisplayingSamples, trackID);
1634
1635     if (!nonDisplayingSamples.isEmpty()) {
1636         trackBuffer.lastEnqueuedPresentationTime = nonDisplayingSamples.last()->presentationTime();
1637         trackBuffer.lastEnqueuedDecodeEndTime = nonDisplayingSamples.last()->decodeTime();
1638     } else {
1639         trackBuffer.lastEnqueuedPresentationTime = MediaTime::invalidTime();
1640         trackBuffer.lastEnqueuedDecodeEndTime = MediaTime::invalidTime();
1641     }
1642
1643     // Fill the decode queue with the remaining samples.
1644     trackBuffer.decodeQueue.clear();
1645     for (auto iter = currentSampleDTSIterator; iter != trackBuffer.samples.decodeOrder().end(); ++iter)
1646         trackBuffer.decodeQueue.insert(*iter);
1647     provideMediaData(trackBuffer, trackID);
1648
1649     trackBuffer.needsReenqueueing = false;
1650 }
1651
1652
1653 void SourceBuffer::didDropSample()
1654 {
1655     if (!isRemoved())
1656         m_source->mediaElement()->incrementDroppedFrameCount();
1657 }
1658
1659 void SourceBuffer::monitorBufferingRate()
1660 {
1661     if (!m_bufferedSinceLastMonitor)
1662         return;
1663
1664     double now = monotonicallyIncreasingTime();
1665     double interval = now - m_timeOfBufferingMonitor;
1666     double rateSinceLastMonitor = m_bufferedSinceLastMonitor / interval;
1667
1668     m_timeOfBufferingMonitor = now;
1669     m_bufferedSinceLastMonitor = 0;
1670
1671     m_averageBufferRate = m_averageBufferRate * (1 - ExponentialMovingAverageCoefficient) + rateSinceLastMonitor * ExponentialMovingAverageCoefficient;
1672
1673     LOG(MediaSource, "SourceBuffer::monitorBufferingRate(%p) - m_avegareBufferRate: %lf", this, m_averageBufferRate);
1674 }
1675
1676 std::unique_ptr<PlatformTimeRanges> SourceBuffer::bufferedAccountingForEndOfStream() const
1677 {
1678     // FIXME: Revisit this method once the spec bug <https://www.w3.org/Bugs/Public/show_bug.cgi?id=26436> is resolved.
1679     std::unique_ptr<PlatformTimeRanges> virtualRanges = PlatformTimeRanges::create(m_buffered->ranges());
1680     if (m_source->isEnded()) {
1681         MediaTime start = virtualRanges->maximumBufferedTime();
1682         MediaTime end = m_source->duration();
1683         if (start <= end)
1684             virtualRanges->add(start, end);
1685     }
1686     return virtualRanges;
1687 }
1688
1689 bool SourceBuffer::hasCurrentTime() const
1690 {
1691     if (isRemoved() || !m_buffered->length())
1692         return false;
1693
1694     MediaTime currentTime = m_source->currentTime();
1695     MediaTime duration = m_source->duration();
1696     if (currentTime >= duration)
1697         return true;
1698
1699     std::unique_ptr<PlatformTimeRanges> ranges = bufferedAccountingForEndOfStream();
1700     return abs(ranges->nearest(currentTime) - currentTime) <= currentTimeFudgeFactor();
1701 }
1702
1703 bool SourceBuffer::hasFutureTime() const
1704 {
1705     if (isRemoved())
1706         return false;
1707
1708     std::unique_ptr<PlatformTimeRanges> ranges = bufferedAccountingForEndOfStream();
1709     if (!ranges->length())
1710         return false;
1711
1712     MediaTime currentTime = m_source->currentTime();
1713     MediaTime duration = m_source->duration();
1714     if (currentTime >= duration)
1715         return true;
1716
1717     MediaTime nearest = ranges->nearest(currentTime);
1718     if (abs(nearest - currentTime) > currentTimeFudgeFactor())
1719         return false;
1720
1721     size_t found = ranges->find(nearest);
1722     if (found == notFound)
1723         return false;
1724
1725     MediaTime localEnd = ranges->end(found);
1726     if (localEnd == duration)
1727         return true;
1728
1729     return localEnd - currentTime > currentTimeFudgeFactor();
1730 }
1731
1732 bool SourceBuffer::canPlayThrough()
1733 {
1734     if (isRemoved())
1735         return false;
1736
1737     monitorBufferingRate();
1738
1739     // Assuming no fluctuations in the buffering rate, loading 1 second per second or greater
1740     // means indefinite playback. This could be improved by taking jitter into account.
1741     if (m_averageBufferRate > 1)
1742         return true;
1743
1744     // Add up all the time yet to be buffered.
1745     MediaTime currentTime = m_source->currentTime();
1746     MediaTime duration = m_source->duration();
1747
1748     std::unique_ptr<PlatformTimeRanges> unbufferedRanges = bufferedAccountingForEndOfStream();
1749     unbufferedRanges->invert();
1750     unbufferedRanges->intersectWith(PlatformTimeRanges(currentTime, std::max(currentTime, duration)));
1751     MediaTime unbufferedTime = unbufferedRanges->totalDuration();
1752     if (!unbufferedTime.isValid())
1753         return true;
1754
1755     MediaTime timeRemaining = duration - currentTime;
1756     return unbufferedTime.toDouble() / m_averageBufferRate < timeRemaining.toDouble();
1757 }
1758
1759 size_t SourceBuffer::extraMemoryCost() const
1760 {
1761     size_t extraMemoryCost = m_pendingAppendData.capacity();
1762     for (auto& trackBuffer : m_trackBufferMap.values())
1763         extraMemoryCost += trackBuffer.samples.sizeInBytes();
1764
1765     return extraMemoryCost;
1766 }
1767
1768 void SourceBuffer::reportExtraMemoryCost()
1769 {
1770     size_t extraMemoryCost = this->extraMemoryCost();
1771     if (extraMemoryCost < m_reportedExtraMemoryCost)
1772         return;
1773
1774     size_t extraMemoryCostDelta = extraMemoryCost - m_reportedExtraMemoryCost;
1775     m_reportedExtraMemoryCost = extraMemoryCost;
1776
1777     JSC::JSLockHolder lock(scriptExecutionContext()->vm());
1778     if (extraMemoryCostDelta > 0)
1779         scriptExecutionContext()->vm().heap.reportExtraMemoryCost(extraMemoryCostDelta);
1780 }
1781
1782 Vector<String> SourceBuffer::bufferedSamplesForTrackID(const AtomicString& trackID)
1783 {
1784     auto it = m_trackBufferMap.find(trackID);
1785     if (it == m_trackBufferMap.end())
1786         return Vector<String>();
1787
1788     TrackBuffer& trackBuffer = it->value;
1789     Vector<String> sampleDescriptions;
1790     for (auto& pair : trackBuffer.samples.decodeOrder())
1791         sampleDescriptions.append(toString(*pair.second));
1792
1793     return sampleDescriptions;
1794 }
1795
1796 Document& SourceBuffer::document() const
1797 {
1798     ASSERT(scriptExecutionContext());
1799     return downcast<Document>(*scriptExecutionContext());
1800 }
1801
1802 } // namespace WebCore
1803
1804 #endif