1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "media/filters/ffmpeg_demuxer.h"
10 #include "base/base64.h"
11 #include "base/bind.h"
12 #include "base/callback.h"
13 #include "base/callback_helpers.h"
14 #include "base/memory/scoped_ptr.h"
15 #include "base/message_loop/message_loop_proxy.h"
16 #include "base/metrics/sparse_histogram.h"
17 #include "base/strings/string_util.h"
18 #include "base/strings/stringprintf.h"
19 #include "base/sys_byteorder.h"
20 #include "base/task_runner_util.h"
21 #include "base/time/time.h"
22 #include "media/base/audio_decoder_config.h"
23 #include "media/base/bind_to_current_loop.h"
24 #include "media/base/decoder_buffer.h"
25 #include "media/base/decrypt_config.h"
26 #include "media/base/limits.h"
27 #include "media/base/media_log.h"
28 #include "media/base/video_decoder_config.h"
29 #include "media/ffmpeg/ffmpeg_common.h"
30 #include "media/filters/ffmpeg_glue.h"
31 #include "media/filters/ffmpeg_h264_to_annex_b_bitstream_converter.h"
32 #include "media/filters/webvtt_util.h"
33 #include "media/formats/webm/webm_crypto_helpers.h"
38 // FFmpegDemuxerStream
40 FFmpegDemuxerStream::FFmpegDemuxerStream(
41 FFmpegDemuxer
* demuxer
,
44 task_runner_(base::MessageLoopProxy::current()),
47 end_of_stream_(false),
48 last_packet_timestamp_(kNoTimestamp()),
49 bitstream_converter_enabled_(false) {
52 bool is_encrypted
= false;
54 // Determine our media format.
55 switch (stream
->codec
->codec_type
) {
56 case AVMEDIA_TYPE_AUDIO
:
58 AVStreamToAudioDecoderConfig(stream
, &audio_config_
, true);
59 is_encrypted
= audio_config_
.is_encrypted();
61 case AVMEDIA_TYPE_VIDEO
:
63 AVStreamToVideoDecoderConfig(stream
, &video_config_
, true);
64 is_encrypted
= video_config_
.is_encrypted();
66 case AVMEDIA_TYPE_SUBTITLE
:
74 // Calculate the duration.
75 duration_
= ConvertStreamTimestamp(stream
->time_base
, stream
->duration
);
77 if (stream_
->codec
->codec_id
== AV_CODEC_ID_H264
) {
78 bitstream_converter_
.reset(
79 new FFmpegH264ToAnnexBBitstreamConverter(stream_
->codec
));
83 AVDictionaryEntry
* key
= av_dict_get(stream
->metadata
, "enc_key_id", NULL
,
87 if (!key
|| !key
->value
)
89 base::StringPiece
base64_key_id(key
->value
);
90 std::string enc_key_id
;
91 base::Base64Decode(base64_key_id
, &enc_key_id
);
92 DCHECK(!enc_key_id
.empty());
93 if (enc_key_id
.empty())
96 encryption_key_id_
.assign(enc_key_id
);
97 demuxer_
->FireNeedKey(kWebMEncryptInitDataType
, enc_key_id
);
101 void FFmpegDemuxerStream::EnqueuePacket(ScopedAVPacket packet
) {
102 DCHECK(task_runner_
->BelongsToCurrentThread());
104 if (!demuxer_
|| end_of_stream_
) {
105 NOTREACHED() << "Attempted to enqueue packet on a stopped stream";
109 // Convert the packet if there is a bitstream filter.
110 if (packet
->data
&& bitstream_converter_enabled_
&&
111 !bitstream_converter_
->ConvertPacket(packet
.get())) {
112 LOG(ERROR
) << "Format conversion failed.";
115 // Get side data if any. For now, the only type of side_data is VP8 Alpha. We
116 // keep this generic so that other side_data types in the future can be
117 // handled the same way as well.
118 av_packet_split_side_data(packet
.get());
120 scoped_refptr
<DecoderBuffer
> buffer
;
122 if (type() == DemuxerStream::TEXT
) {
124 uint8
* id_data
= av_packet_get_side_data(
126 AV_PKT_DATA_WEBVTT_IDENTIFIER
,
129 int settings_size
= 0;
130 uint8
* settings_data
= av_packet_get_side_data(
132 AV_PKT_DATA_WEBVTT_SETTINGS
,
135 std::vector
<uint8
> side_data
;
136 MakeSideData(id_data
, id_data
+ id_size
,
137 settings_data
, settings_data
+ settings_size
,
140 buffer
= DecoderBuffer::CopyFrom(packet
.get()->data
, packet
.get()->size
,
141 side_data
.data(), side_data
.size());
143 int side_data_size
= 0;
144 uint8
* side_data
= av_packet_get_side_data(
146 AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL
,
149 scoped_ptr
<DecryptConfig
> decrypt_config
;
151 if ((type() == DemuxerStream::AUDIO
&& audio_config_
.is_encrypted()) ||
152 (type() == DemuxerStream::VIDEO
&& video_config_
.is_encrypted())) {
153 if (!WebMCreateDecryptConfig(
154 packet
->data
, packet
->size
,
155 reinterpret_cast<const uint8
*>(encryption_key_id_
.data()),
156 encryption_key_id_
.size(),
159 LOG(ERROR
) << "Creation of DecryptConfig failed.";
163 // If a packet is returned by FFmpeg's av_parser_parse2() the packet will
164 // reference inner memory of FFmpeg. As such we should transfer the packet
165 // into memory we control.
166 if (side_data_size
> 0) {
167 buffer
= DecoderBuffer::CopyFrom(packet
.get()->data
+ data_offset
,
168 packet
.get()->size
- data_offset
,
169 side_data
, side_data_size
);
171 buffer
= DecoderBuffer::CopyFrom(packet
.get()->data
+ data_offset
,
172 packet
.get()->size
- data_offset
);
175 int skip_samples_size
= 0;
176 uint8
* skip_samples
= av_packet_get_side_data(packet
.get(),
177 AV_PKT_DATA_SKIP_SAMPLES
,
179 const int kSkipSamplesValidSize
= 10;
180 const int kSkipSamplesOffset
= 4;
181 if (skip_samples_size
>= kSkipSamplesValidSize
) {
182 int discard_padding_samples
= base::ByteSwapToLE32(
183 *(reinterpret_cast<const uint32
*>(skip_samples
+
184 kSkipSamplesOffset
)));
185 // TODO(vigneshv): Change decoder buffer to use number of samples so that
186 // this conversion can be avoided.
187 buffer
->set_discard_padding(base::TimeDelta::FromMicroseconds(
188 discard_padding_samples
* 1000000.0 /
189 audio_decoder_config().samples_per_second()));
193 buffer
->set_decrypt_config(decrypt_config
.Pass());
196 buffer
->set_timestamp(ConvertStreamTimestamp(
197 stream_
->time_base
, packet
->pts
));
198 buffer
->set_duration(ConvertStreamTimestamp(
199 stream_
->time_base
, packet
->duration
));
200 if (buffer
->timestamp() != kNoTimestamp() &&
201 last_packet_timestamp_
!= kNoTimestamp() &&
202 last_packet_timestamp_
< buffer
->timestamp()) {
203 buffered_ranges_
.Add(last_packet_timestamp_
, buffer
->timestamp());
204 demuxer_
->NotifyBufferingChanged();
206 last_packet_timestamp_
= buffer
->timestamp();
208 buffer_queue_
.Push(buffer
);
209 SatisfyPendingRead();
212 void FFmpegDemuxerStream::SetEndOfStream() {
213 DCHECK(task_runner_
->BelongsToCurrentThread());
214 end_of_stream_
= true;
215 SatisfyPendingRead();
218 void FFmpegDemuxerStream::FlushBuffers() {
219 DCHECK(task_runner_
->BelongsToCurrentThread());
220 DCHECK(read_cb_
.is_null()) << "There should be no pending read";
221 buffer_queue_
.Clear();
222 end_of_stream_
= false;
223 last_packet_timestamp_
= kNoTimestamp();
226 void FFmpegDemuxerStream::Stop() {
227 DCHECK(task_runner_
->BelongsToCurrentThread());
228 buffer_queue_
.Clear();
229 if (!read_cb_
.is_null()) {
230 base::ResetAndReturn(&read_cb_
).Run(
231 DemuxerStream::kOk
, DecoderBuffer::CreateEOSBuffer());
235 end_of_stream_
= true;
238 base::TimeDelta
FFmpegDemuxerStream::duration() {
242 DemuxerStream::Type
FFmpegDemuxerStream::type() {
243 DCHECK(task_runner_
->BelongsToCurrentThread());
247 void FFmpegDemuxerStream::Read(const ReadCB
& read_cb
) {
248 DCHECK(task_runner_
->BelongsToCurrentThread());
249 CHECK(read_cb_
.is_null()) << "Overlapping reads are not supported";
250 read_cb_
= BindToCurrentLoop(read_cb
);
252 // Don't accept any additional reads if we've been told to stop.
253 // The |demuxer_| may have been destroyed in the pipeline thread.
255 // TODO(scherkus): it would be cleaner to reply with an error message.
257 base::ResetAndReturn(&read_cb_
).Run(
258 DemuxerStream::kOk
, DecoderBuffer::CreateEOSBuffer());
262 SatisfyPendingRead();
265 void FFmpegDemuxerStream::EnableBitstreamConverter() {
266 DCHECK(task_runner_
->BelongsToCurrentThread());
267 CHECK(bitstream_converter_
.get());
268 bitstream_converter_enabled_
= true;
271 AudioDecoderConfig
FFmpegDemuxerStream::audio_decoder_config() {
272 DCHECK(task_runner_
->BelongsToCurrentThread());
273 CHECK_EQ(type_
, AUDIO
);
274 return audio_config_
;
277 VideoDecoderConfig
FFmpegDemuxerStream::video_decoder_config() {
278 DCHECK(task_runner_
->BelongsToCurrentThread());
279 CHECK_EQ(type_
, VIDEO
);
280 return video_config_
;
283 FFmpegDemuxerStream::~FFmpegDemuxerStream() {
285 DCHECK(read_cb_
.is_null());
286 DCHECK(buffer_queue_
.IsEmpty());
289 base::TimeDelta
FFmpegDemuxerStream::GetElapsedTime() const {
290 return ConvertStreamTimestamp(stream_
->time_base
, stream_
->cur_dts
);
293 Ranges
<base::TimeDelta
> FFmpegDemuxerStream::GetBufferedRanges() const {
294 return buffered_ranges_
;
297 void FFmpegDemuxerStream::SatisfyPendingRead() {
298 DCHECK(task_runner_
->BelongsToCurrentThread());
299 if (!read_cb_
.is_null()) {
300 if (!buffer_queue_
.IsEmpty()) {
301 base::ResetAndReturn(&read_cb_
).Run(
302 DemuxerStream::kOk
, buffer_queue_
.Pop());
303 } else if (end_of_stream_
) {
304 base::ResetAndReturn(&read_cb_
).Run(
305 DemuxerStream::kOk
, DecoderBuffer::CreateEOSBuffer());
309 // Have capacity? Ask for more!
310 if (HasAvailableCapacity() && !end_of_stream_
) {
311 demuxer_
->NotifyCapacityAvailable();
315 bool FFmpegDemuxerStream::HasAvailableCapacity() {
316 // TODO(scherkus): Remove early return and reenable time-based capacity
317 // after our data sources support canceling/concurrent reads, see
318 // http://crbug.com/165762 for details.
319 return !read_cb_
.is_null();
321 // Try to have one second's worth of encoded data per stream.
322 const base::TimeDelta kCapacity
= base::TimeDelta::FromSeconds(1);
323 return buffer_queue_
.IsEmpty() || buffer_queue_
.Duration() < kCapacity
;
326 size_t FFmpegDemuxerStream::MemoryUsage() const {
327 return buffer_queue_
.data_size();
330 TextKind
FFmpegDemuxerStream::GetTextKind() const {
331 DCHECK_EQ(type_
, DemuxerStream::TEXT
);
333 if (stream_
->disposition
& AV_DISPOSITION_CAPTIONS
)
334 return kTextCaptions
;
336 if (stream_
->disposition
& AV_DISPOSITION_DESCRIPTIONS
)
337 return kTextDescriptions
;
339 if (stream_
->disposition
& AV_DISPOSITION_METADATA
)
340 return kTextMetadata
;
342 return kTextSubtitles
;
345 std::string
FFmpegDemuxerStream::GetMetadata(const char* key
) const {
346 const AVDictionaryEntry
* entry
=
347 av_dict_get(stream_
->metadata
, key
, NULL
, 0);
348 return (entry
== NULL
|| entry
->value
== NULL
) ? "" : entry
->value
;
352 base::TimeDelta
FFmpegDemuxerStream::ConvertStreamTimestamp(
353 const AVRational
& time_base
, int64 timestamp
) {
354 if (timestamp
== static_cast<int64
>(AV_NOPTS_VALUE
))
355 return kNoTimestamp();
357 return ConvertFromTimeBase(time_base
, timestamp
);
363 FFmpegDemuxer::FFmpegDemuxer(
364 const scoped_refptr
<base::SingleThreadTaskRunner
>& task_runner
,
365 DataSource
* data_source
,
366 const NeedKeyCB
& need_key_cb
,
367 const scoped_refptr
<MediaLog
>& media_log
)
369 task_runner_(task_runner
),
371 blocking_thread_("FFmpegDemuxer"),
372 pending_read_(false),
373 pending_seek_(false),
374 data_source_(data_source
),
375 media_log_(media_log
),
377 start_time_(kNoTimestamp()),
378 audio_disabled_(false),
379 text_enabled_(false),
380 duration_known_(false),
381 need_key_cb_(need_key_cb
) {
382 DCHECK(task_runner_
.get());
383 DCHECK(data_source_
);
386 FFmpegDemuxer::~FFmpegDemuxer() {}
388 void FFmpegDemuxer::Stop(const base::Closure
& callback
) {
389 DCHECK(task_runner_
->BelongsToCurrentThread());
390 url_protocol_
->Abort();
391 data_source_
->Stop(BindToCurrentLoop(base::Bind(
392 &FFmpegDemuxer::OnDataSourceStopped
, weak_this_
,
393 BindToCurrentLoop(callback
))));
397 void FFmpegDemuxer::Seek(base::TimeDelta time
, const PipelineStatusCB
& cb
) {
398 DCHECK(task_runner_
->BelongsToCurrentThread());
399 CHECK(!pending_seek_
);
401 // TODO(scherkus): Inspect |pending_read_| and cancel IO via |blocking_url_|,
402 // otherwise we can end up waiting for a pre-seek read to complete even though
403 // we know we're going to drop it on the floor.
405 // Always seek to a timestamp less than or equal to the desired timestamp.
406 int flags
= AVSEEK_FLAG_BACKWARD
;
408 // Passing -1 as our stream index lets FFmpeg pick a default stream. FFmpeg
409 // will attempt to use the lowest-index video stream, if present, followed by
410 // the lowest-index audio stream.
411 pending_seek_
= true;
412 base::PostTaskAndReplyWithResult(
413 blocking_thread_
.message_loop_proxy().get(),
415 base::Bind(&av_seek_frame
,
416 glue_
->format_context(),
418 time
.InMicroseconds(),
420 base::Bind(&FFmpegDemuxer::OnSeekFrameDone
, weak_this_
, cb
));
423 void FFmpegDemuxer::OnAudioRendererDisabled() {
424 DCHECK(task_runner_
->BelongsToCurrentThread());
425 audio_disabled_
= true;
426 StreamVector::iterator iter
;
427 for (iter
= streams_
.begin(); iter
!= streams_
.end(); ++iter
) {
428 if (*iter
&& (*iter
)->type() == DemuxerStream::AUDIO
) {
434 void FFmpegDemuxer::Initialize(DemuxerHost
* host
,
435 const PipelineStatusCB
& status_cb
,
436 bool enable_text_tracks
) {
437 DCHECK(task_runner_
->BelongsToCurrentThread());
439 weak_this_
= weak_factory_
.GetWeakPtr();
440 text_enabled_
= enable_text_tracks
;
442 // TODO(scherkus): DataSource should have a host by this point,
443 // see http://crbug.com/122071
444 data_source_
->set_host(host
);
446 url_protocol_
.reset(new BlockingUrlProtocol(data_source_
, BindToCurrentLoop(
447 base::Bind(&FFmpegDemuxer::OnDataSourceError
, base::Unretained(this)))));
448 glue_
.reset(new FFmpegGlue(url_protocol_
.get()));
449 AVFormatContext
* format_context
= glue_
->format_context();
451 // Disable ID3v1 tag reading to avoid costly seeks to end of file for data we
452 // don't use. FFmpeg will only read ID3v1 tags if no other metadata is
453 // available, so add a metadata entry to ensure some is always present.
454 av_dict_set(&format_context
->metadata
, "skip_id3v1_tags", "", 0);
456 // Open the AVFormatContext using our glue layer.
457 CHECK(blocking_thread_
.Start());
458 base::PostTaskAndReplyWithResult(
459 blocking_thread_
.message_loop_proxy().get(),
461 base::Bind(&FFmpegGlue::OpenContext
, base::Unretained(glue_
.get())),
462 base::Bind(&FFmpegDemuxer::OnOpenContextDone
, weak_this_
, status_cb
));
465 DemuxerStream
* FFmpegDemuxer::GetStream(DemuxerStream::Type type
) {
466 DCHECK(task_runner_
->BelongsToCurrentThread());
467 return GetFFmpegStream(type
);
470 FFmpegDemuxerStream
* FFmpegDemuxer::GetFFmpegStream(
471 DemuxerStream::Type type
) const {
472 StreamVector::const_iterator iter
;
473 for (iter
= streams_
.begin(); iter
!= streams_
.end(); ++iter
) {
474 if (*iter
&& (*iter
)->type() == type
) {
481 base::TimeDelta
FFmpegDemuxer::GetStartTime() const {
482 DCHECK(task_runner_
->BelongsToCurrentThread());
486 void FFmpegDemuxer::AddTextStreams() {
487 DCHECK(task_runner_
->BelongsToCurrentThread());
489 for (StreamVector::size_type idx
= 0; idx
< streams_
.size(); ++idx
) {
490 FFmpegDemuxerStream
* stream
= streams_
[idx
];
491 if (stream
== NULL
|| stream
->type() != DemuxerStream::TEXT
)
494 TextKind kind
= stream
->GetTextKind();
495 std::string title
= stream
->GetMetadata("title");
496 std::string language
= stream
->GetMetadata("language");
498 // TODO: Implement "id" metadata in FFMPEG.
499 // See: http://crbug.com/323183
500 host_
->AddTextStream(stream
, TextTrackConfig(kind
, title
, language
,
505 // Helper for calculating the bitrate of the media based on information stored
506 // in |format_context| or failing that the size and duration of the media.
508 // Returns 0 if a bitrate could not be determined.
509 static int CalculateBitrate(
510 AVFormatContext
* format_context
,
511 const base::TimeDelta
& duration
,
512 int64 filesize_in_bytes
) {
513 // If there is a bitrate set on the container, use it.
514 if (format_context
->bit_rate
> 0)
515 return format_context
->bit_rate
;
517 // Then try to sum the bitrates individually per stream.
519 for (size_t i
= 0; i
< format_context
->nb_streams
; ++i
) {
520 AVCodecContext
* codec_context
= format_context
->streams
[i
]->codec
;
521 bitrate
+= codec_context
->bit_rate
;
526 // See if we can approximate the bitrate as long as we have a filesize and
528 if (duration
.InMicroseconds() <= 0 ||
529 duration
== kInfiniteDuration() ||
530 filesize_in_bytes
== 0) {
534 // Do math in floating point as we'd overflow an int64 if the filesize was
535 // larger than ~1073GB.
536 double bytes
= filesize_in_bytes
;
537 double duration_us
= duration
.InMicroseconds();
538 return bytes
* 8000000.0 / duration_us
;
541 void FFmpegDemuxer::OnOpenContextDone(const PipelineStatusCB
& status_cb
,
543 DCHECK(task_runner_
->BelongsToCurrentThread());
544 if (!blocking_thread_
.IsRunning()) {
545 status_cb
.Run(PIPELINE_ERROR_ABORT
);
550 status_cb
.Run(DEMUXER_ERROR_COULD_NOT_OPEN
);
554 // Fully initialize AVFormatContext by parsing the stream a little.
555 base::PostTaskAndReplyWithResult(
556 blocking_thread_
.message_loop_proxy().get(),
558 base::Bind(&avformat_find_stream_info
,
559 glue_
->format_context(),
560 static_cast<AVDictionary
**>(NULL
)),
561 base::Bind(&FFmpegDemuxer::OnFindStreamInfoDone
, weak_this_
, status_cb
));
564 void FFmpegDemuxer::OnFindStreamInfoDone(const PipelineStatusCB
& status_cb
,
566 DCHECK(task_runner_
->BelongsToCurrentThread());
567 if (!blocking_thread_
.IsRunning() || !data_source_
) {
568 status_cb
.Run(PIPELINE_ERROR_ABORT
);
573 status_cb
.Run(DEMUXER_ERROR_COULD_NOT_PARSE
);
577 // Create demuxer stream entries for each possible AVStream. Each stream
578 // is examined to determine if it is supported or not (is the codec enabled
579 // for it in this release?). Unsupported streams are skipped, allowing for
580 // partial playback. At least one audio or video stream must be playable.
581 AVFormatContext
* format_context
= glue_
->format_context();
582 streams_
.resize(format_context
->nb_streams
);
584 AVStream
* audio_stream
= NULL
;
585 AudioDecoderConfig audio_config
;
587 AVStream
* video_stream
= NULL
;
588 VideoDecoderConfig video_config
;
590 base::TimeDelta max_duration
;
591 for (size_t i
= 0; i
< format_context
->nb_streams
; ++i
) {
592 AVStream
* stream
= format_context
->streams
[i
];
593 AVCodecContext
* codec_context
= stream
->codec
;
594 AVMediaType codec_type
= codec_context
->codec_type
;
596 if (codec_type
== AVMEDIA_TYPE_AUDIO
) {
600 // Log the codec detected, whether it is supported or not.
601 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedAudioCodec",
602 codec_context
->codec_id
);
603 // Ensure the codec is supported. IsValidConfig() also checks that the
604 // channel layout and sample format are valid.
605 AVStreamToAudioDecoderConfig(stream
, &audio_config
, false);
606 if (!audio_config
.IsValidConfig())
608 audio_stream
= stream
;
609 } else if (codec_type
== AVMEDIA_TYPE_VIDEO
) {
613 // Log the codec detected, whether it is supported or not.
614 UMA_HISTOGRAM_SPARSE_SLOWLY("Media.DetectedVideoCodec",
615 codec_context
->codec_id
);
616 // Ensure the codec is supported. IsValidConfig() also checks that the
617 // frame size and visible size are valid.
618 AVStreamToVideoDecoderConfig(stream
, &video_config
, false);
620 if (!video_config
.IsValidConfig())
622 video_stream
= stream
;
623 } else if (codec_type
== AVMEDIA_TYPE_SUBTITLE
) {
624 if (codec_context
->codec_id
!= AV_CODEC_ID_WEBVTT
|| !text_enabled_
) {
631 streams_
[i
] = new FFmpegDemuxerStream(this, stream
);
632 max_duration
= std::max(max_duration
, streams_
[i
]->duration());
634 if (stream
->first_dts
!= static_cast<int64_t>(AV_NOPTS_VALUE
)) {
635 const base::TimeDelta first_dts
= ConvertFromTimeBase(
636 stream
->time_base
, stream
->first_dts
);
637 if (start_time_
== kNoTimestamp() || first_dts
< start_time_
)
638 start_time_
= first_dts
;
642 if (!audio_stream
&& !video_stream
) {
643 status_cb
.Run(DEMUXER_ERROR_NO_SUPPORTED_STREAMS
);
650 if (format_context
->duration
!= static_cast<int64_t>(AV_NOPTS_VALUE
)) {
651 // If there is a duration value in the container use that to find the
652 // maximum between it and the duration from A/V streams.
653 const AVRational av_time_base
= {1, AV_TIME_BASE
};
655 std::max(max_duration
,
656 ConvertFromTimeBase(av_time_base
, format_context
->duration
));
658 // The duration is unknown, in which case this is likely a live stream.
659 max_duration
= kInfiniteDuration();
662 // Some demuxers, like WAV, do not put timestamps on their frames. We
663 // assume the the start time is 0.
664 if (start_time_
== kNoTimestamp())
665 start_time_
= base::TimeDelta();
667 // MPEG-4 B-frames cause grief for a simple container like AVI. Enable PTS
668 // generation so we always get timestamps, see http://crbug.com/169570
669 if (strcmp(format_context
->iformat
->name
, "avi") == 0)
670 format_context
->flags
|= AVFMT_FLAG_GENPTS
;
672 // Good to go: set the duration and bitrate and notify we're done
674 host_
->SetDuration(max_duration
);
675 duration_known_
= (max_duration
!= kInfiniteDuration());
677 int64 filesize_in_bytes
= 0;
678 url_protocol_
->GetSize(&filesize_in_bytes
);
679 bitrate_
= CalculateBitrate(format_context
, max_duration
, filesize_in_bytes
);
681 data_source_
->SetBitrate(bitrate_
);
685 AVCodecContext
* audio_codec
= audio_stream
->codec
;
686 media_log_
->SetBooleanProperty("found_audio_stream", true);
688 SampleFormat sample_format
= audio_config
.sample_format();
689 std::string sample_name
= SampleFormatToString(sample_format
);
691 media_log_
->SetStringProperty("audio_sample_format", sample_name
);
693 AVCodec
* codec
= avcodec_find_decoder(audio_codec
->codec_id
);
695 media_log_
->SetStringProperty("audio_codec_name", codec
->name
);
698 media_log_
->SetIntegerProperty("audio_channels_count",
699 audio_codec
->channels
);
700 media_log_
->SetIntegerProperty("audio_samples_per_second",
701 audio_config
.samples_per_second());
703 media_log_
->SetBooleanProperty("found_audio_stream", false);
708 AVCodecContext
* video_codec
= video_stream
->codec
;
709 media_log_
->SetBooleanProperty("found_video_stream", true);
711 AVCodec
* codec
= avcodec_find_decoder(video_codec
->codec_id
);
713 media_log_
->SetStringProperty("video_codec_name", codec
->name
);
716 media_log_
->SetIntegerProperty("width", video_codec
->width
);
717 media_log_
->SetIntegerProperty("height", video_codec
->height
);
718 media_log_
->SetIntegerProperty("coded_width",
719 video_codec
->coded_width
);
720 media_log_
->SetIntegerProperty("coded_height",
721 video_codec
->coded_height
);
722 media_log_
->SetStringProperty(
724 base::StringPrintf("%d/%d",
725 video_codec
->time_base
.num
,
726 video_codec
->time_base
.den
));
727 media_log_
->SetStringProperty(
728 "video_format", VideoFrame::FormatToString(video_config
.format()));
729 media_log_
->SetBooleanProperty("video_is_encrypted",
730 video_config
.is_encrypted());
732 media_log_
->SetBooleanProperty("found_video_stream", false);
736 media_log_
->SetDoubleProperty("max_duration", max_duration
.InSecondsF());
737 media_log_
->SetDoubleProperty("start_time", start_time_
.InSecondsF());
738 media_log_
->SetIntegerProperty("bitrate", bitrate_
);
740 status_cb
.Run(PIPELINE_OK
);
743 void FFmpegDemuxer::OnSeekFrameDone(const PipelineStatusCB
& cb
, int result
) {
744 DCHECK(task_runner_
->BelongsToCurrentThread());
745 CHECK(pending_seek_
);
746 pending_seek_
= false;
748 if (!blocking_thread_
.IsRunning()) {
749 cb
.Run(PIPELINE_ERROR_ABORT
);
754 // Use VLOG(1) instead of NOTIMPLEMENTED() to prevent the message being
755 // captured from stdout and contaminates testing.
756 // TODO(scherkus): Implement this properly and signal error (BUG=23447).
757 VLOG(1) << "Not implemented";
760 // Tell streams to flush buffers due to seeking.
761 StreamVector::iterator iter
;
762 for (iter
= streams_
.begin(); iter
!= streams_
.end(); ++iter
) {
764 (*iter
)->FlushBuffers();
767 // Resume reading until capacity.
770 // Notify we're finished seeking.
774 void FFmpegDemuxer::ReadFrameIfNeeded() {
775 DCHECK(task_runner_
->BelongsToCurrentThread());
777 // Make sure we have work to do before reading.
778 if (!blocking_thread_
.IsRunning() || !StreamsHaveAvailableCapacity() ||
779 pending_read_
|| pending_seek_
) {
783 // Allocate and read an AVPacket from the media. Save |packet_ptr| since
784 // evaluation order of packet.get() and base::Passed(&packet) is
786 ScopedAVPacket
packet(new AVPacket());
787 AVPacket
* packet_ptr
= packet
.get();
789 pending_read_
= true;
790 base::PostTaskAndReplyWithResult(
791 blocking_thread_
.message_loop_proxy().get(),
793 base::Bind(&av_read_frame
, glue_
->format_context(), packet_ptr
),
795 &FFmpegDemuxer::OnReadFrameDone
, weak_this_
, base::Passed(&packet
)));
798 void FFmpegDemuxer::OnReadFrameDone(ScopedAVPacket packet
, int result
) {
799 DCHECK(task_runner_
->BelongsToCurrentThread());
800 DCHECK(pending_read_
);
801 pending_read_
= false;
803 if (!blocking_thread_
.IsRunning() || pending_seek_
) {
807 // Consider the stream as ended if:
808 // - either underlying ffmpeg returned an error
809 // - or FFMpegDemuxer reached the maximum allowed memory usage.
810 if (result
< 0 || IsMaxMemoryUsageReached()) {
811 // Update the duration based on the highest elapsed time across all streams
812 // if it was previously unknown.
813 if (!duration_known_
) {
814 base::TimeDelta max_duration
;
816 for (StreamVector::iterator iter
= streams_
.begin();
817 iter
!= streams_
.end();
822 base::TimeDelta duration
= (*iter
)->GetElapsedTime();
823 if (duration
!= kNoTimestamp() && duration
> max_duration
)
824 max_duration
= duration
;
827 if (max_duration
> base::TimeDelta()) {
828 host_
->SetDuration(max_duration
);
829 duration_known_
= true;
832 // If we have reached the end of stream, tell the downstream filters about
838 // Queue the packet with the appropriate stream.
839 DCHECK_GE(packet
->stream_index
, 0);
840 DCHECK_LT(packet
->stream_index
, static_cast<int>(streams_
.size()));
842 // Defend against ffmpeg giving us a bad stream index.
843 if (packet
->stream_index
>= 0 &&
844 packet
->stream_index
< static_cast<int>(streams_
.size()) &&
845 streams_
[packet
->stream_index
] &&
847 streams_
[packet
->stream_index
]->type() != DemuxerStream::AUDIO
)) {
849 // TODO(scherkus): Fix demuxing upstream to never return packets w/o data
850 // when av_read_frame() returns success code. See bug comment for ideas:
852 // https://code.google.com/p/chromium/issues/detail?id=169133#c10
854 ScopedAVPacket
new_packet(new AVPacket());
855 av_new_packet(new_packet
.get(), 0);
857 new_packet
->pts
= packet
->pts
;
858 new_packet
->dts
= packet
->dts
;
859 new_packet
->pos
= packet
->pos
;
860 new_packet
->duration
= packet
->duration
;
861 new_packet
->convergence_duration
= packet
->convergence_duration
;
862 new_packet
->flags
= packet
->flags
;
863 new_packet
->stream_index
= packet
->stream_index
;
865 packet
.swap(new_packet
);
868 // Special case for opus in ogg. FFmpeg is pre-trimming the codec delay
869 // from the packet timestamp. Chrome expects to handle this itself inside
870 // the decoder, so shift timestamps by the delay in this case.
871 // TODO(dalecurtis): Try to get fixed upstream. See http://crbug.com/328207
872 if (strcmp(glue_
->format_context()->iformat
->name
, "ogg") == 0) {
873 const AVCodecContext
* codec_context
=
874 glue_
->format_context()->streams
[packet
->stream_index
]->codec
;
875 if (codec_context
->codec_id
== AV_CODEC_ID_OPUS
&&
876 codec_context
->delay
> 0) {
877 packet
->pts
+= codec_context
->delay
;
881 FFmpegDemuxerStream
* demuxer_stream
= streams_
[packet
->stream_index
];
882 demuxer_stream
->EnqueuePacket(packet
.Pass());
885 // Keep reading until we've reached capacity.
889 void FFmpegDemuxer::OnDataSourceStopped(const base::Closure
& callback
) {
890 // This will block until all tasks complete. Note that after this returns it's
891 // possible for reply tasks (e.g., OnReadFrameDone()) to be queued on this
892 // thread. Each of the reply task methods must check whether we've stopped the
893 // thread and drop their results on the floor.
894 DCHECK(task_runner_
->BelongsToCurrentThread());
895 blocking_thread_
.Stop();
897 StreamVector::iterator iter
;
898 for (iter
= streams_
.begin(); iter
!= streams_
.end(); ++iter
) {
906 bool FFmpegDemuxer::StreamsHaveAvailableCapacity() {
907 DCHECK(task_runner_
->BelongsToCurrentThread());
908 StreamVector::iterator iter
;
909 for (iter
= streams_
.begin(); iter
!= streams_
.end(); ++iter
) {
910 if (*iter
&& (*iter
)->HasAvailableCapacity()) {
917 bool FFmpegDemuxer::IsMaxMemoryUsageReached() const {
918 DCHECK(task_runner_
->BelongsToCurrentThread());
920 // Max allowed memory usage, all streams combined.
921 const size_t kDemuxerMemoryLimit
= 150 * 1024 * 1024;
923 size_t memory_left
= kDemuxerMemoryLimit
;
924 for (StreamVector::const_iterator iter
= streams_
.begin();
925 iter
!= streams_
.end(); ++iter
) {
929 size_t stream_memory_usage
= (*iter
)->MemoryUsage();
930 if (stream_memory_usage
> memory_left
)
932 memory_left
-= stream_memory_usage
;
937 void FFmpegDemuxer::StreamHasEnded() {
938 DCHECK(task_runner_
->BelongsToCurrentThread());
939 StreamVector::iterator iter
;
940 for (iter
= streams_
.begin(); iter
!= streams_
.end(); ++iter
) {
942 (audio_disabled_
&& (*iter
)->type() == DemuxerStream::AUDIO
)) {
945 (*iter
)->SetEndOfStream();
949 void FFmpegDemuxer::FireNeedKey(const std::string
& init_data_type
,
950 const std::string
& encryption_key_id
) {
951 std::vector
<uint8
> key_id_local(encryption_key_id
.begin(),
952 encryption_key_id
.end());
953 need_key_cb_
.Run(init_data_type
, key_id_local
);
956 void FFmpegDemuxer::NotifyCapacityAvailable() {
957 DCHECK(task_runner_
->BelongsToCurrentThread());
961 void FFmpegDemuxer::NotifyBufferingChanged() {
962 DCHECK(task_runner_
->BelongsToCurrentThread());
963 Ranges
<base::TimeDelta
> buffered
;
964 FFmpegDemuxerStream
* audio
=
965 audio_disabled_
? NULL
: GetFFmpegStream(DemuxerStream::AUDIO
);
966 FFmpegDemuxerStream
* video
= GetFFmpegStream(DemuxerStream::VIDEO
);
967 if (audio
&& video
) {
968 buffered
= audio
->GetBufferedRanges().IntersectionWith(
969 video
->GetBufferedRanges());
971 buffered
= audio
->GetBufferedRanges();
973 buffered
= video
->GetBufferedRanges();
975 for (size_t i
= 0; i
< buffered
.size(); ++i
)
976 host_
->AddBufferedTimeRange(buffered
.start(i
), buffered
.end(i
));
979 void FFmpegDemuxer::OnDataSourceError() {
980 host_
->OnDemuxerError(PIPELINE_ERROR_READ
);