Rename GetIconID to GetIconId
[chromium-blink-merge.git] / media / filters / ffmpeg_video_decoder.cc
blobdd45a079a1b404571609a31c58b194fa429c97c4
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "media/filters/ffmpeg_video_decoder.h"
7 #include <algorithm>
8 #include <string>
10 #include "base/bind.h"
11 #include "base/callback_helpers.h"
12 #include "base/command_line.h"
13 #include "base/location.h"
14 #include "base/single_thread_task_runner.h"
15 #include "base/strings/string_number_conversions.h"
16 #include "media/base/bind_to_current_loop.h"
17 #include "media/base/decoder_buffer.h"
18 #include "media/base/limits.h"
19 #include "media/base/media_switches.h"
20 #include "media/base/pipeline.h"
21 #include "media/base/video_frame.h"
22 #include "media/base/video_util.h"
23 #include "media/ffmpeg/ffmpeg_common.h"
24 #include "media/filters/ffmpeg_glue.h"
26 namespace media {
28 // Always try to use three threads for video decoding. There is little reason
29 // not to since current day CPUs tend to be multi-core and we measured
30 // performance benefits on older machines such as P4s with hyperthreading.
32 // Handling decoding on separate threads also frees up the pipeline thread to
33 // continue processing. Although it'd be nice to have the option of a single
34 // decoding thread, FFmpeg treats having one thread the same as having zero
35 // threads (i.e., avcodec_decode_video() will execute on the calling thread).
36 // Yet another reason for having two threads :)
37 static const int kDecodeThreads = 2;
38 static const int kMaxDecodeThreads = 16;
40 // Returns the number of threads given the FFmpeg CodecID. Also inspects the
41 // command line for a valid --video-threads flag.
42 static int GetThreadCount(AVCodecID codec_id) {
43 // Refer to http://crbug.com/93932 for tsan suppressions on decoding.
44 int decode_threads = kDecodeThreads;
46 const base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess();
47 std::string threads(cmd_line->GetSwitchValueASCII(switches::kVideoThreads));
48 if (threads.empty() || !base::StringToInt(threads, &decode_threads))
49 return decode_threads;
51 decode_threads = std::max(decode_threads, 0);
52 decode_threads = std::min(decode_threads, kMaxDecodeThreads);
53 return decode_threads;
56 static int GetVideoBufferImpl(struct AVCodecContext* s,
57 AVFrame* frame,
58 int flags) {
59 FFmpegVideoDecoder* decoder = static_cast<FFmpegVideoDecoder*>(s->opaque);
60 return decoder->GetVideoBuffer(s, frame, flags);
63 static void ReleaseVideoBufferImpl(void* opaque, uint8* data) {
64 scoped_refptr<VideoFrame> video_frame;
65 video_frame.swap(reinterpret_cast<VideoFrame**>(&opaque));
68 // static
69 bool FFmpegVideoDecoder::IsCodecSupported(VideoCodec codec) {
70 FFmpegGlue::InitializeFFmpeg();
71 return avcodec_find_decoder(VideoCodecToCodecID(codec)) != nullptr;
74 FFmpegVideoDecoder::FFmpegVideoDecoder(
75 const scoped_refptr<base::SingleThreadTaskRunner>& task_runner)
76 : task_runner_(task_runner), state_(kUninitialized),
77 decode_nalus_(false) {}
79 int FFmpegVideoDecoder::GetVideoBuffer(struct AVCodecContext* codec_context,
80 AVFrame* frame,
81 int flags) {
82 // Don't use |codec_context_| here! With threaded decoding,
83 // it will contain unsynchronized width/height/pix_fmt values,
84 // whereas |codec_context| contains the current threads's
85 // updated width/height/pix_fmt, which can change for adaptive
86 // content.
87 const VideoPixelFormat format =
88 AVPixelFormatToVideoPixelFormat(codec_context->pix_fmt);
90 if (format == PIXEL_FORMAT_UNKNOWN)
91 return AVERROR(EINVAL);
92 DCHECK(format == PIXEL_FORMAT_YV12 || format == PIXEL_FORMAT_YV16 ||
93 format == PIXEL_FORMAT_YV24);
95 gfx::Size size(codec_context->width, codec_context->height);
96 const int ret = av_image_check_size(size.width(), size.height(), 0, NULL);
97 if (ret < 0)
98 return ret;
100 gfx::Size natural_size;
101 if (codec_context->sample_aspect_ratio.num > 0) {
102 natural_size = GetNaturalSize(size,
103 codec_context->sample_aspect_ratio.num,
104 codec_context->sample_aspect_ratio.den);
105 } else {
106 natural_size = config_.natural_size();
109 // FFmpeg has specific requirements on the allocation size of the frame. The
110 // following logic replicates FFmpeg's allocation strategy to ensure buffers
111 // are not overread / overwritten. See ff_init_buffer_info() for details.
113 // When lowres is non-zero, dimensions should be divided by 2^(lowres), but
114 // since we don't use this, just DCHECK that it's zero.
115 DCHECK_EQ(codec_context->lowres, 0);
116 gfx::Size coded_size(std::max(size.width(), codec_context->coded_width),
117 std::max(size.height(), codec_context->coded_height));
119 if (!VideoFrame::IsValidConfig(format, VideoFrame::STORAGE_UNKNOWN,
120 coded_size, gfx::Rect(size), natural_size)) {
121 return AVERROR(EINVAL);
124 // FFmpeg expects the initialize allocation to be zero-initialized. Failure
125 // to do so can lead to unitialized value usage. See http://crbug.com/390941
126 scoped_refptr<VideoFrame> video_frame = frame_pool_.CreateFrame(
127 format, coded_size, gfx::Rect(size), natural_size, kNoTimestamp());
129 // Prefer the color space from the codec context. If it's not specified (or is
130 // set to an unsupported value), fall back on the value from the config.
131 ColorSpace color_space = AVColorSpaceToColorSpace(codec_context->colorspace,
132 codec_context->color_range);
133 if (color_space == COLOR_SPACE_UNSPECIFIED)
134 color_space = config_.color_space();
135 video_frame->metadata()->SetInteger(VideoFrameMetadata::COLOR_SPACE,
136 color_space);
138 for (size_t i = 0; i < VideoFrame::NumPlanes(video_frame->format()); i++) {
139 frame->data[i] = video_frame->data(i);
140 frame->linesize[i] = video_frame->stride(i);
143 frame->width = coded_size.width();
144 frame->height = coded_size.height();
145 frame->format = codec_context->pix_fmt;
146 frame->reordered_opaque = codec_context->reordered_opaque;
148 // Now create an AVBufferRef for the data just allocated. It will own the
149 // reference to the VideoFrame object.
150 void* opaque = NULL;
151 video_frame.swap(reinterpret_cast<VideoFrame**>(&opaque));
152 frame->buf[0] =
153 av_buffer_create(frame->data[0],
154 VideoFrame::AllocationSize(format, coded_size),
155 ReleaseVideoBufferImpl,
156 opaque,
158 return 0;
161 std::string FFmpegVideoDecoder::GetDisplayName() const {
162 return "FFmpegVideoDecoder";
165 void FFmpegVideoDecoder::Initialize(const VideoDecoderConfig& config,
166 bool low_delay,
167 const InitCB& init_cb,
168 const OutputCB& output_cb) {
169 DCHECK(task_runner_->BelongsToCurrentThread());
170 DCHECK(!config.is_encrypted());
171 DCHECK(!output_cb.is_null());
173 FFmpegGlue::InitializeFFmpeg();
175 config_ = config;
176 InitCB bound_init_cb = BindToCurrentLoop(init_cb);
178 if (!config.IsValidConfig() || !ConfigureDecoder(low_delay)) {
179 bound_init_cb.Run(false);
180 return;
183 output_cb_ = BindToCurrentLoop(output_cb);
185 // Success!
186 state_ = kNormal;
187 bound_init_cb.Run(true);
190 void FFmpegVideoDecoder::Decode(const scoped_refptr<DecoderBuffer>& buffer,
191 const DecodeCB& decode_cb) {
192 DCHECK(task_runner_->BelongsToCurrentThread());
193 DCHECK(buffer.get());
194 DCHECK(!decode_cb.is_null());
195 CHECK_NE(state_, kUninitialized);
197 DecodeCB decode_cb_bound = BindToCurrentLoop(decode_cb);
199 if (state_ == kError) {
200 decode_cb_bound.Run(kDecodeError);
201 return;
204 if (state_ == kDecodeFinished) {
205 decode_cb_bound.Run(kOk);
206 return;
209 DCHECK_EQ(state_, kNormal);
211 // During decode, because reads are issued asynchronously, it is possible to
212 // receive multiple end of stream buffers since each decode is acked. When the
213 // first end of stream buffer is read, FFmpeg may still have frames queued
214 // up in the decoder so we need to go through the decode loop until it stops
215 // giving sensible data. After that, the decoder should output empty
216 // frames. There are three states the decoder can be in:
218 // kNormal: This is the starting state. Buffers are decoded. Decode errors
219 // are discarded.
220 // kDecodeFinished: All calls return empty frames.
221 // kError: Unexpected error happened.
223 // These are the possible state transitions.
225 // kNormal -> kDecodeFinished:
226 // When EOS buffer is received and the codec has been flushed.
227 // kNormal -> kError:
228 // A decoding error occurs and decoding needs to stop.
229 // (any state) -> kNormal:
230 // Any time Reset() is called.
232 bool has_produced_frame;
233 do {
234 has_produced_frame = false;
235 if (!FFmpegDecode(buffer, &has_produced_frame)) {
236 state_ = kError;
237 decode_cb_bound.Run(kDecodeError);
238 return;
240 // Repeat to flush the decoder after receiving EOS buffer.
241 } while (buffer->end_of_stream() && has_produced_frame);
243 if (buffer->end_of_stream())
244 state_ = kDecodeFinished;
246 // VideoDecoderShim expects that |decode_cb| is called only after
247 // |output_cb_|.
248 decode_cb_bound.Run(kOk);
251 void FFmpegVideoDecoder::Reset(const base::Closure& closure) {
252 DCHECK(task_runner_->BelongsToCurrentThread());
254 avcodec_flush_buffers(codec_context_.get());
255 state_ = kNormal;
256 task_runner_->PostTask(FROM_HERE, closure);
259 FFmpegVideoDecoder::~FFmpegVideoDecoder() {
260 DCHECK(task_runner_->BelongsToCurrentThread());
262 if (state_ != kUninitialized)
263 ReleaseFFmpegResources();
266 bool FFmpegVideoDecoder::FFmpegDecode(
267 const scoped_refptr<DecoderBuffer>& buffer,
268 bool* has_produced_frame) {
269 DCHECK(!*has_produced_frame);
271 // Create a packet for input data.
272 // Due to FFmpeg API changes we no longer have const read-only pointers.
273 AVPacket packet;
274 av_init_packet(&packet);
275 if (buffer->end_of_stream()) {
276 packet.data = NULL;
277 packet.size = 0;
278 } else {
279 packet.data = const_cast<uint8*>(buffer->data());
280 packet.size = buffer->data_size();
282 // Let FFmpeg handle presentation timestamp reordering.
283 codec_context_->reordered_opaque = buffer->timestamp().InMicroseconds();
286 int frame_decoded = 0;
287 int result = avcodec_decode_video2(codec_context_.get(),
288 av_frame_.get(),
289 &frame_decoded,
290 &packet);
291 // Log the problem if we can't decode a video frame and exit early.
292 if (result < 0) {
293 LOG(ERROR) << "Error decoding video: " << buffer->AsHumanReadableString();
294 return false;
297 // FFmpeg says some codecs might have multiple frames per packet. Previous
298 // discussions with rbultje@ indicate this shouldn't be true for the codecs
299 // we use.
300 DCHECK_EQ(result, packet.size);
302 // If no frame was produced then signal that more data is required to
303 // produce more frames. This can happen under two circumstances:
304 // 1) Decoder was recently initialized/flushed
305 // 2) End of stream was reached and all internal frames have been output
306 if (frame_decoded == 0) {
307 return true;
310 // TODO(fbarchard): Work around for FFmpeg http://crbug.com/27675
311 // The decoder is in a bad state and not decoding correctly.
312 // Checking for NULL avoids a crash in CopyPlane().
313 if (!av_frame_->data[VideoFrame::kYPlane] ||
314 !av_frame_->data[VideoFrame::kUPlane] ||
315 !av_frame_->data[VideoFrame::kVPlane]) {
316 LOG(ERROR) << "Video frame was produced yet has invalid frame data.";
317 av_frame_unref(av_frame_.get());
318 return false;
321 scoped_refptr<VideoFrame> frame =
322 reinterpret_cast<VideoFrame*>(av_buffer_get_opaque(av_frame_->buf[0]));
323 frame->set_timestamp(
324 base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque));
325 *has_produced_frame = true;
326 output_cb_.Run(frame);
328 av_frame_unref(av_frame_.get());
329 return true;
332 void FFmpegVideoDecoder::ReleaseFFmpegResources() {
333 codec_context_.reset();
334 av_frame_.reset();
337 bool FFmpegVideoDecoder::ConfigureDecoder(bool low_delay) {
338 // Release existing decoder resources if necessary.
339 ReleaseFFmpegResources();
341 // Initialize AVCodecContext structure.
342 codec_context_.reset(avcodec_alloc_context3(NULL));
343 VideoDecoderConfigToAVCodecContext(config_, codec_context_.get());
345 codec_context_->thread_count = GetThreadCount(codec_context_->codec_id);
346 codec_context_->thread_type = low_delay ? FF_THREAD_SLICE : FF_THREAD_FRAME;
347 codec_context_->opaque = this;
348 codec_context_->flags |= CODEC_FLAG_EMU_EDGE;
349 codec_context_->get_buffer2 = GetVideoBufferImpl;
350 codec_context_->refcounted_frames = 1;
352 if (decode_nalus_)
353 codec_context_->flags2 |= CODEC_FLAG2_CHUNKS;
355 AVCodec* codec = avcodec_find_decoder(codec_context_->codec_id);
356 if (!codec || avcodec_open2(codec_context_.get(), codec, NULL) < 0) {
357 ReleaseFFmpegResources();
358 return false;
361 av_frame_.reset(av_frame_alloc());
362 return true;
365 } // namespace media