#import "AudioTrackPrivateMediaSourceAVFObjC.h"
#import "VideoTrackPrivateMediaSourceAVFObjC.h"
#import "InbandTextTrackPrivateAVFObjC.h"
-#import <AVFoundation/AVAssetTrack.h>
-#import <AVFoundation/AVSampleBufferDisplayLayer.h>
+#import <AVFoundation/AVFoundation.h>
#import <objc/runtime.h>
#import <wtf/text/AtomicString.h>
#import <wtf/text/CString.h>
#pragma mark -
#pragma mark AVSampleBufferAudioRenderer
-#if __MAC_OS_X_VERSION_MIN_REQUIRED <= 1090
+#if PLATFORM(IOS) || __MAC_OS_X_VERSION_MIN_REQUIRED <= 1090
@interface AVSampleBufferAudioRenderer : NSObject
- (NSInteger)status;
- (NSError*)error;
_parent->didParseStreamDataAsAsset(asset);
}
+- (void)streamDataParser:(AVStreamDataParser *)streamDataParser didParseStreamDataAsAsset:(AVAsset *)asset withDiscontinuity:(BOOL)discontinuity
+{
+ UNUSED_PARAM(discontinuity);
+#if ASSERT_DISABLED
+ UNUSED_PARAM(streamDataParser);
+#endif
+ ASSERT(streamDataParser == _parser);
+ _parent->didParseStreamDataAsAsset(asset);
+}
+
- (void)streamDataParser:(AVStreamDataParser *)streamDataParser didFailToParseStreamDataWithError:(NSError *)error
{
#if ASSERT_DISABLED
#pragma mark -
#pragma mark MediaSampleAVFObjC
-class MediaSampleAVFObjC FINAL : public MediaSample {
+class MediaSampleAVFObjC final : public MediaSample {
public:
static RefPtr<MediaSampleAVFObjC> create(CMSampleBufferRef sample, int trackID) { return adoptRef(new MediaSampleAVFObjC(sample, trackID)); }
virtual ~MediaSampleAVFObjC() { }
- virtual MediaTime presentationTime() const OVERRIDE { return toMediaTime(CMSampleBufferGetPresentationTimeStamp(m_sample.get())); }
- virtual MediaTime decodeTime() const OVERRIDE { return toMediaTime(CMSampleBufferGetDecodeTimeStamp(m_sample.get())); }
- virtual MediaTime duration() const OVERRIDE { return toMediaTime(CMSampleBufferGetDuration(m_sample.get())); }
- virtual AtomicString trackID() const OVERRIDE { return m_id; }
+ virtual MediaTime presentationTime() const override { return toMediaTime(CMSampleBufferGetPresentationTimeStamp(m_sample.get())); }
+ virtual MediaTime decodeTime() const override { return toMediaTime(CMSampleBufferGetDecodeTimeStamp(m_sample.get())); }
+ virtual MediaTime duration() const override { return toMediaTime(CMSampleBufferGetDuration(m_sample.get())); }
+ virtual AtomicString trackID() const override { return m_id; }
- virtual SampleFlags flags() const OVERRIDE;
- virtual PlatformSample platformSample() OVERRIDE;
+ virtual SampleFlags flags() const override;
+ virtual PlatformSample platformSample() override;
protected:
MediaSampleAVFObjC(CMSampleBufferRef sample, int trackID)
#pragma mark -
#pragma mark MediaDescriptionAVFObjC
-class MediaDescriptionAVFObjC FINAL : public MediaDescription {
+class MediaDescriptionAVFObjC final : public MediaDescription {
public:
static RefPtr<MediaDescriptionAVFObjC> create(AVAssetTrack* track) { return adoptRef(new MediaDescriptionAVFObjC(track)); }
virtual ~MediaDescriptionAVFObjC() { }
- virtual AtomicString codec() const OVERRIDE { return m_codec; }
- virtual bool isVideo() const OVERRIDE { return m_isVideo; }
- virtual bool isAudio() const OVERRIDE { return m_isAudio; }
- virtual bool isText() const OVERRIDE { return m_isText; }
+ virtual AtomicString codec() const override { return m_codec; }
+ virtual bool isVideo() const override { return m_isVideo; }
+ virtual bool isAudio() const override { return m_isAudio; }
+ virtual bool isText() const override { return m_isText; }
protected:
MediaDescriptionAVFObjC(AVAssetTrack* track)
m_asset = asset;
+ m_videoTracks.clear();
+ m_audioTracks.clear();
+
SourceBufferPrivateClient::InitializationSegment segment;
segment.duration = toMediaTime([m_asset duration]);
for (AVAssetTrack* track in [m_asset tracks]) {
if ([track hasMediaCharacteristic:AVMediaCharacteristicVisual]) {
SourceBufferPrivateClient::InitializationSegment::VideoTrackInformation info;
- info.track = VideoTrackPrivateMediaSourceAVFObjC::create(track, this);
+ RefPtr<VideoTrackPrivateMediaSourceAVFObjC> videoTrack = VideoTrackPrivateMediaSourceAVFObjC::create(track, this);
+ info.track = videoTrack;
+ m_videoTracks.append(videoTrack);
info.description = MediaDescriptionAVFObjC::create(track);
segment.videoTracks.append(info);
} else if ([track hasMediaCharacteristic:AVMediaCharacteristicAudible]) {
SourceBufferPrivateClient::InitializationSegment::AudioTrackInformation info;
- info.track = AudioTrackPrivateMediaSourceAVFObjC::create(track, this);
+ RefPtr<AudioTrackPrivateMediaSourceAVFObjC> audioTrack = AudioTrackPrivateMediaSourceAVFObjC::create(track, this);
+ info.track = audioTrack;
+ m_audioTracks.append(audioTrack);
info.description = MediaDescriptionAVFObjC::create(track);
segment.audioTracks.append(info);
}
// FIXME(125161): Add TextTrack support
}
+ if (!m_videoTracks.isEmpty())
+ m_mediaSource->player()->sizeChanged();
+
if (m_client)
m_client->sourceBufferPrivateDidReceiveInitializationSegment(this, segment);
}
const String& mediaType;
};
-static OSStatus callProcessCodedFrameForEachSample(CMSampleBufferRef sampleBuffer, CMItemCount, void *refcon)
-{
- ProcessCodedFrameInfo* info = static_cast<ProcessCodedFrameInfo*>(refcon);
- return info->sourceBuffer->processCodedFrame(info->trackID, sampleBuffer, info->mediaType) ? noErr : paramErr;
-}
-
void SourceBufferPrivateAVFObjC::didProvideMediaDataForTrackID(int trackID, CMSampleBufferRef sampleBuffer, const String& mediaType, unsigned flags)
{
UNUSED_PARAM(flags);
- ProcessCodedFrameInfo info = {this, trackID, mediaType};
- CMSampleBufferCallForEachSample(sampleBuffer, &callProcessCodedFrameForEachSample, &info);
+ processCodedFrame(trackID, sampleBuffer, mediaType);
}
bool SourceBufferPrivateAVFObjC::processCodedFrame(int trackID, CMSampleBufferRef sampleBuffer, const String&)
m_client->sourceBufferPrivateSeekToTime(this, time);
}
+IntSize SourceBufferPrivateAVFObjC::naturalSize()
+{
+ for (auto videoTrack : m_videoTracks) {
+ if (videoTrack->selected())
+ return videoTrack->naturalSize();
+ }
+
+ return IntSize();
+}
+
void SourceBufferPrivateAVFObjC::didBecomeReadyForMoreSamples(int trackID)
{
if (trackID == m_enabledVideoTrackID)