Add explicit |forceOnlineSignin| to user pod status
[chromium-blink-merge.git] / media / filters / ffmpeg_video_decoder.cc
blobff14e89d3711196a713a3e562ee2d975fae758d9
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "media/filters/ffmpeg_video_decoder.h"
7 #include <algorithm>
8 #include <string>
10 #include "base/bind.h"
11 #include "base/callback_helpers.h"
12 #include "base/command_line.h"
13 #include "base/location.h"
14 #include "base/single_thread_task_runner.h"
15 #include "base/strings/string_number_conversions.h"
16 #include "media/base/bind_to_current_loop.h"
17 #include "media/base/decoder_buffer.h"
18 #include "media/base/limits.h"
19 #include "media/base/media_switches.h"
20 #include "media/base/pipeline.h"
21 #include "media/base/video_decoder_config.h"
22 #include "media/base/video_frame.h"
23 #include "media/base/video_util.h"
24 #include "media/ffmpeg/ffmpeg_common.h"
25 #include "media/filters/ffmpeg_glue.h"
27 namespace media {
29 // Always try to use three threads for video decoding. There is little reason
30 // not to since current day CPUs tend to be multi-core and we measured
31 // performance benefits on older machines such as P4s with hyperthreading.
33 // Handling decoding on separate threads also frees up the pipeline thread to
34 // continue processing. Although it'd be nice to have the option of a single
35 // decoding thread, FFmpeg treats having one thread the same as having zero
36 // threads (i.e., avcodec_decode_video() will execute on the calling thread).
37 // Yet another reason for having two threads :)
38 static const int kDecodeThreads = 2;
39 static const int kMaxDecodeThreads = 16;
41 // Returns the number of threads given the FFmpeg CodecID. Also inspects the
42 // command line for a valid --video-threads flag.
43 static int GetThreadCount(AVCodecID codec_id) {
44 // Refer to http://crbug.com/93932 for tsan suppressions on decoding.
45 int decode_threads = kDecodeThreads;
47 const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
48 std::string threads(cmd_line->GetSwitchValueASCII(switches::kVideoThreads));
49 if (threads.empty() || !base::StringToInt(threads, &decode_threads))
50 return decode_threads;
52 decode_threads = std::max(decode_threads, 0);
53 decode_threads = std::min(decode_threads, kMaxDecodeThreads);
54 return decode_threads;
57 FFmpegVideoDecoder::FFmpegVideoDecoder(
58 const scoped_refptr<base::SingleThreadTaskRunner>& task_runner)
59 : task_runner_(task_runner),
60 weak_factory_(this),
61 state_(kUninitialized) {
64 int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context,
65 AVFrame* frame) {
66 // Don't use |codec_context_| here! With threaded decoding,
67 // it will contain unsynchronized width/height/pix_fmt values,
68 // whereas |codec_context| contains the current threads's
69 // updated width/height/pix_fmt, which can change for adaptive
70 // content.
71 VideoFrame::Format format = PixelFormatToVideoFormat(codec_context->pix_fmt);
72 if (format == VideoFrame::UNKNOWN)
73 return AVERROR(EINVAL);
74 DCHECK(format == VideoFrame::YV12 || format == VideoFrame::YV16 ||
75 format == VideoFrame::YV12J);
77 gfx::Size size(codec_context->width, codec_context->height);
78 int ret;
79 if ((ret = av_image_check_size(size.width(), size.height(), 0, NULL)) < 0)
80 return ret;
82 gfx::Size natural_size;
83 if (codec_context->sample_aspect_ratio.num > 0) {
84 natural_size = GetNaturalSize(size,
85 codec_context->sample_aspect_ratio.num,
86 codec_context->sample_aspect_ratio.den);
87 } else {
88 natural_size = config_.natural_size();
91 if (!VideoFrame::IsValidConfig(format, size, gfx::Rect(size), natural_size))
92 return AVERROR(EINVAL);
94 scoped_refptr<VideoFrame> video_frame =
95 frame_pool_.CreateFrame(format, size, gfx::Rect(size),
96 natural_size, kNoTimestamp());
98 for (int i = 0; i < 3; i++) {
99 frame->base[i] = video_frame->data(i);
100 frame->data[i] = video_frame->data(i);
101 frame->linesize[i] = video_frame->stride(i);
104 frame->opaque = NULL;
105 video_frame.swap(reinterpret_cast<VideoFrame**>(&frame->opaque));
106 frame->type = FF_BUFFER_TYPE_USER;
107 frame->width = codec_context->width;
108 frame->height = codec_context->height;
109 frame->format = codec_context->pix_fmt;
111 return 0;
114 static int GetVideoBufferImpl(AVCodecContext* s, AVFrame* frame) {
115 FFmpegVideoDecoder* decoder = static_cast<FFmpegVideoDecoder*>(s->opaque);
116 return decoder->GetVideoBuffer(s, frame);
119 static void ReleaseVideoBufferImpl(AVCodecContext* s, AVFrame* frame) {
120 scoped_refptr<VideoFrame> video_frame;
121 video_frame.swap(reinterpret_cast<VideoFrame**>(&frame->opaque));
123 // The FFmpeg API expects us to zero the data pointers in
124 // this callback
125 memset(frame->data, 0, sizeof(frame->data));
126 frame->opaque = NULL;
129 void FFmpegVideoDecoder::Initialize(const VideoDecoderConfig& config,
130 const PipelineStatusCB& status_cb) {
131 DCHECK(task_runner_->BelongsToCurrentThread());
132 DCHECK(decode_cb_.is_null());
133 DCHECK(reset_cb_.is_null());
134 DCHECK(!config.is_encrypted());
136 FFmpegGlue::InitializeFFmpeg();
137 weak_this_ = weak_factory_.GetWeakPtr();
139 config_ = config;
140 PipelineStatusCB initialize_cb = BindToCurrentLoop(status_cb);
142 if (!config.IsValidConfig() || !ConfigureDecoder()) {
143 initialize_cb.Run(DECODER_ERROR_NOT_SUPPORTED);
144 return;
147 // Success!
148 state_ = kNormal;
149 initialize_cb.Run(PIPELINE_OK);
152 void FFmpegVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
153 const DecodeCB& decode_cb) {
154 DCHECK(task_runner_->BelongsToCurrentThread());
155 DCHECK(!decode_cb.is_null());
156 CHECK_NE(state_, kUninitialized);
157 CHECK(decode_cb_.is_null()) << "Overlapping decodes are not supported.";
158 decode_cb_ = BindToCurrentLoop(decode_cb);
160 if (state_ == kError) {
161 base::ResetAndReturn(&decode_cb_).Run(kDecodeError, NULL);
162 return;
165 // Return empty frames if decoding has finished.
166 if (state_ == kDecodeFinished) {
167 base::ResetAndReturn(&decode_cb_).Run(kOk, VideoFrame::CreateEOSFrame());
168 return;
171 DecodeBuffer(buffer);
174 void FFmpegVideoDecoder::Reset(const base::Closure& closure) {
175 DCHECK(task_runner_->BelongsToCurrentThread());
176 DCHECK(reset_cb_.is_null());
177 reset_cb_ = BindToCurrentLoop(closure);
179 // Defer the reset if a decode is pending.
180 if (!decode_cb_.is_null())
181 return;
183 DoReset();
186 void FFmpegVideoDecoder::DoReset() {
187 DCHECK(decode_cb_.is_null());
189 avcodec_flush_buffers(codec_context_.get());
190 state_ = kNormal;
191 base::ResetAndReturn(&reset_cb_).Run();
194 void FFmpegVideoDecoder::Stop(const base::Closure& closure) {
195 DCHECK(task_runner_->BelongsToCurrentThread());
196 base::ScopedClosureRunner runner(BindToCurrentLoop(closure));
198 if (state_ == kUninitialized)
199 return;
201 if (!decode_cb_.is_null()) {
202 base::ResetAndReturn(&decode_cb_).Run(kAborted, NULL);
203 // Reset is pending only when decode is pending.
204 if (!reset_cb_.is_null())
205 base::ResetAndReturn(&reset_cb_).Run();
208 ReleaseFFmpegResources();
209 state_ = kUninitialized;
212 FFmpegVideoDecoder::~FFmpegVideoDecoder() {
213 DCHECK_EQ(kUninitialized, state_);
214 DCHECK(!codec_context_);
215 DCHECK(!av_frame_);
218 void FFmpegVideoDecoder::DecodeBuffer(
219 const scoped_refptr<DecoderBuffer>& buffer) {
220 DCHECK(task_runner_->BelongsToCurrentThread());
221 DCHECK_NE(state_, kUninitialized);
222 DCHECK_NE(state_, kDecodeFinished);
223 DCHECK_NE(state_, kError);
224 DCHECK(reset_cb_.is_null());
225 DCHECK(!decode_cb_.is_null());
226 DCHECK(buffer);
228 // During decode, because reads are issued asynchronously, it is possible to
229 // receive multiple end of stream buffers since each decode is acked. When the
230 // first end of stream buffer is read, FFmpeg may still have frames queued
231 // up in the decoder so we need to go through the decode loop until it stops
232 // giving sensible data. After that, the decoder should output empty
233 // frames. There are three states the decoder can be in:
235 // kNormal: This is the starting state. Buffers are decoded. Decode errors
236 // are discarded.
237 // kFlushCodec: There isn't any more input data. Call avcodec_decode_video2
238 // until no more data is returned to flush out remaining
239 // frames. The input buffer is ignored at this point.
240 // kDecodeFinished: All calls return empty frames.
241 // kError: Unexpected error happened.
243 // These are the possible state transitions.
245 // kNormal -> kFlushCodec:
246 // When buffer->end_of_stream() is first true.
247 // kNormal -> kError:
248 // A decoding error occurs and decoding needs to stop.
249 // kFlushCodec -> kDecodeFinished:
250 // When avcodec_decode_video2() returns 0 data.
251 // kFlushCodec -> kError:
252 // When avcodec_decode_video2() errors out.
253 // (any state) -> kNormal:
254 // Any time Reset() is called.
256 // Transition to kFlushCodec on the first end of stream buffer.
257 if (state_ == kNormal && buffer->end_of_stream()) {
258 state_ = kFlushCodec;
261 scoped_refptr<VideoFrame> video_frame;
262 if (!FFmpegDecode(buffer, &video_frame)) {
263 state_ = kError;
264 base::ResetAndReturn(&decode_cb_).Run(kDecodeError, NULL);
265 return;
268 if (!video_frame.get()) {
269 if (state_ == kFlushCodec) {
270 DCHECK(buffer->end_of_stream());
271 state_ = kDecodeFinished;
272 base::ResetAndReturn(&decode_cb_)
273 .Run(kOk, VideoFrame::CreateEOSFrame());
274 return;
277 base::ResetAndReturn(&decode_cb_).Run(kNotEnoughData, NULL);
278 return;
281 base::ResetAndReturn(&decode_cb_).Run(kOk, video_frame);
284 bool FFmpegVideoDecoder::FFmpegDecode(
285 const scoped_refptr<DecoderBuffer>& buffer,
286 scoped_refptr<VideoFrame>* video_frame) {
287 DCHECK(video_frame);
289 // Reset frame to default values.
290 avcodec_get_frame_defaults(av_frame_.get());
292 // Create a packet for input data.
293 // Due to FFmpeg API changes we no longer have const read-only pointers.
294 AVPacket packet;
295 av_init_packet(&packet);
296 if (buffer->end_of_stream()) {
297 packet.data = NULL;
298 packet.size = 0;
299 } else {
300 packet.data = const_cast<uint8*>(buffer->data());
301 packet.size = buffer->data_size();
303 // Let FFmpeg handle presentation timestamp reordering.
304 codec_context_->reordered_opaque = buffer->timestamp().InMicroseconds();
306 // This is for codecs not using get_buffer to initialize
307 // |av_frame_->reordered_opaque|
308 av_frame_->reordered_opaque = codec_context_->reordered_opaque;
311 int frame_decoded = 0;
312 int result = avcodec_decode_video2(codec_context_.get(),
313 av_frame_.get(),
314 &frame_decoded,
315 &packet);
316 // Log the problem if we can't decode a video frame and exit early.
317 if (result < 0) {
318 LOG(ERROR) << "Error decoding video: " << buffer->AsHumanReadableString();
319 *video_frame = NULL;
320 return false;
323 // If no frame was produced then signal that more data is required to
324 // produce more frames. This can happen under two circumstances:
325 // 1) Decoder was recently initialized/flushed
326 // 2) End of stream was reached and all internal frames have been output
327 if (frame_decoded == 0) {
328 *video_frame = NULL;
329 return true;
332 // TODO(fbarchard): Work around for FFmpeg http://crbug.com/27675
333 // The decoder is in a bad state and not decoding correctly.
334 // Checking for NULL avoids a crash in CopyPlane().
335 if (!av_frame_->data[VideoFrame::kYPlane] ||
336 !av_frame_->data[VideoFrame::kUPlane] ||
337 !av_frame_->data[VideoFrame::kVPlane]) {
338 LOG(ERROR) << "Video frame was produced yet has invalid frame data.";
339 *video_frame = NULL;
340 return false;
343 if (!av_frame_->opaque) {
344 LOG(ERROR) << "VideoFrame object associated with frame data not set.";
345 return false;
347 *video_frame = static_cast<VideoFrame*>(av_frame_->opaque);
349 (*video_frame)->SetTimestamp(
350 base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque));
352 return true;
355 void FFmpegVideoDecoder::ReleaseFFmpegResources() {
356 codec_context_.reset();
357 av_frame_.reset();
360 bool FFmpegVideoDecoder::ConfigureDecoder() {
361 // Release existing decoder resources if necessary.
362 ReleaseFFmpegResources();
364 // Initialize AVCodecContext structure.
365 codec_context_.reset(avcodec_alloc_context3(NULL));
366 VideoDecoderConfigToAVCodecContext(config_, codec_context_.get());
368 // Enable motion vector search (potentially slow), strong deblocking filter
369 // for damaged macroblocks, and set our error detection sensitivity.
370 codec_context_->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK;
371 codec_context_->thread_count = GetThreadCount(codec_context_->codec_id);
372 codec_context_->opaque = this;
373 codec_context_->flags |= CODEC_FLAG_EMU_EDGE;
374 codec_context_->get_buffer = GetVideoBufferImpl;
375 codec_context_->release_buffer = ReleaseVideoBufferImpl;
377 AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
378 if (!codec || avcodec_open2(codec_context_.get(), codec, NULL) < 0) {
379 ReleaseFFmpegResources();
380 return false;
383 av_frame_.reset(av_frame_alloc());
384 return true;
387 } // namespace media