cygprofile: increase timeouts to allow showing web contents
[chromium-blink-merge.git] / media / ffmpeg / ffmpeg_common.cc
bloba92bf7c5d3cdaea6c860b9d1648252446159c871
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "media/ffmpeg/ffmpeg_common.h"
7 #include "base/basictypes.h"
8 #include "base/logging.h"
9 #include "base/metrics/histogram.h"
10 #include "base/strings/string_number_conversions.h"
11 #include "base/strings/string_split.h"
12 #include "base/strings/string_util.h"
13 #include "media/base/decoder_buffer.h"
14 #include "media/base/video_util.h"
16 namespace media {
18 // Why FF_INPUT_BUFFER_PADDING_SIZE? FFmpeg assumes all input buffers are
19 // padded. Check here to ensure FFmpeg only receives data padded to its
20 // specifications.
21 static_assert(DecoderBuffer::kPaddingSize >= FF_INPUT_BUFFER_PADDING_SIZE,
22 "DecoderBuffer padding size does not fit ffmpeg requirement");
24 // Alignment requirement by FFmpeg for input and output buffers. This need to
25 // be updated to match FFmpeg when it changes.
26 #if defined(ARCH_CPU_ARM_FAMILY)
27 static const int kFFmpegBufferAddressAlignment = 16;
28 #else
29 static const int kFFmpegBufferAddressAlignment = 32;
30 #endif
32 // Check here to ensure FFmpeg only receives data aligned to its specifications.
33 static_assert(
34 DecoderBuffer::kAlignmentSize >= kFFmpegBufferAddressAlignment &&
35 DecoderBuffer::kAlignmentSize % kFFmpegBufferAddressAlignment == 0,
36 "DecoderBuffer alignment size does not fit ffmpeg requirement");
38 // Allows faster SIMD YUV convert. Also, FFmpeg overreads/-writes occasionally.
39 // See video_get_buffer() in libavcodec/utils.c.
40 static const int kFFmpegOutputBufferPaddingSize = 16;
42 static_assert(VideoFrame::kFrameSizePadding >= kFFmpegOutputBufferPaddingSize,
43 "VideoFrame padding size does not fit ffmpeg requirement");
45 static_assert(
46 VideoFrame::kFrameAddressAlignment >= kFFmpegBufferAddressAlignment &&
47 VideoFrame::kFrameAddressAlignment % kFFmpegBufferAddressAlignment == 0,
48 "VideoFrame frame address alignment does not fit ffmpeg requirement");
50 static const AVRational kMicrosBase = { 1, base::Time::kMicrosecondsPerSecond };
52 base::TimeDelta ConvertFromTimeBase(const AVRational& time_base,
53 int64 timestamp) {
54 int64 microseconds = av_rescale_q(timestamp, time_base, kMicrosBase);
55 return base::TimeDelta::FromMicroseconds(microseconds);
58 int64 ConvertToTimeBase(const AVRational& time_base,
59 const base::TimeDelta& timestamp) {
60 return av_rescale_q(timestamp.InMicroseconds(), kMicrosBase, time_base);
63 // Converts an FFmpeg audio codec ID into its corresponding supported codec id.
64 static AudioCodec CodecIDToAudioCodec(AVCodecID codec_id) {
65 switch (codec_id) {
66 case AV_CODEC_ID_AAC:
67 return kCodecAAC;
68 case AV_CODEC_ID_MP3:
69 return kCodecMP3;
70 case AV_CODEC_ID_VORBIS:
71 return kCodecVorbis;
72 case AV_CODEC_ID_PCM_U8:
73 case AV_CODEC_ID_PCM_S16LE:
74 case AV_CODEC_ID_PCM_S24LE:
75 case AV_CODEC_ID_PCM_F32LE:
76 return kCodecPCM;
77 case AV_CODEC_ID_PCM_S16BE:
78 return kCodecPCM_S16BE;
79 case AV_CODEC_ID_PCM_S24BE:
80 return kCodecPCM_S24BE;
81 case AV_CODEC_ID_FLAC:
82 return kCodecFLAC;
83 case AV_CODEC_ID_AMR_NB:
84 return kCodecAMR_NB;
85 case AV_CODEC_ID_AMR_WB:
86 return kCodecAMR_WB;
87 case AV_CODEC_ID_GSM_MS:
88 return kCodecGSM_MS;
89 case AV_CODEC_ID_PCM_ALAW:
90 return kCodecPCM_ALAW;
91 case AV_CODEC_ID_PCM_MULAW:
92 return kCodecPCM_MULAW;
93 case AV_CODEC_ID_OPUS:
94 return kCodecOpus;
95 case AV_CODEC_ID_ALAC:
96 return kCodecALAC;
97 default:
98 DVLOG(1) << "Unknown audio CodecID: " << codec_id;
100 return kUnknownAudioCodec;
103 static AVCodecID AudioCodecToCodecID(AudioCodec audio_codec,
104 SampleFormat sample_format) {
105 switch (audio_codec) {
106 case kCodecAAC:
107 return AV_CODEC_ID_AAC;
108 case kCodecALAC:
109 return AV_CODEC_ID_ALAC;
110 case kCodecMP3:
111 return AV_CODEC_ID_MP3;
112 case kCodecPCM:
113 switch (sample_format) {
114 case kSampleFormatU8:
115 return AV_CODEC_ID_PCM_U8;
116 case kSampleFormatS16:
117 return AV_CODEC_ID_PCM_S16LE;
118 case kSampleFormatS32:
119 return AV_CODEC_ID_PCM_S24LE;
120 case kSampleFormatF32:
121 return AV_CODEC_ID_PCM_F32LE;
122 default:
123 DVLOG(1) << "Unsupported sample format: " << sample_format;
125 break;
126 case kCodecPCM_S16BE:
127 return AV_CODEC_ID_PCM_S16BE;
128 case kCodecPCM_S24BE:
129 return AV_CODEC_ID_PCM_S24BE;
130 case kCodecVorbis:
131 return AV_CODEC_ID_VORBIS;
132 case kCodecFLAC:
133 return AV_CODEC_ID_FLAC;
134 case kCodecAMR_NB:
135 return AV_CODEC_ID_AMR_NB;
136 case kCodecAMR_WB:
137 return AV_CODEC_ID_AMR_WB;
138 case kCodecGSM_MS:
139 return AV_CODEC_ID_GSM_MS;
140 case kCodecPCM_ALAW:
141 return AV_CODEC_ID_PCM_ALAW;
142 case kCodecPCM_MULAW:
143 return AV_CODEC_ID_PCM_MULAW;
144 case kCodecOpus:
145 return AV_CODEC_ID_OPUS;
146 default:
147 DVLOG(1) << "Unknown AudioCodec: " << audio_codec;
149 return AV_CODEC_ID_NONE;
152 // Converts an FFmpeg video codec ID into its corresponding supported codec id.
153 static VideoCodec CodecIDToVideoCodec(AVCodecID codec_id) {
154 switch (codec_id) {
155 case AV_CODEC_ID_H264:
156 return kCodecH264;
157 #if defined(ENABLE_HEVC_DEMUXING)
158 case AV_CODEC_ID_HEVC:
159 return kCodecHEVC;
160 #endif
161 case AV_CODEC_ID_THEORA:
162 return kCodecTheora;
163 case AV_CODEC_ID_MPEG4:
164 return kCodecMPEG4;
165 case AV_CODEC_ID_VP8:
166 return kCodecVP8;
167 case AV_CODEC_ID_VP9:
168 return kCodecVP9;
169 default:
170 DVLOG(1) << "Unknown video CodecID: " << codec_id;
172 return kUnknownVideoCodec;
175 AVCodecID VideoCodecToCodecID(VideoCodec video_codec) {
176 switch (video_codec) {
177 case kCodecH264:
178 return AV_CODEC_ID_H264;
179 #if defined(ENABLE_HEVC_DEMUXING)
180 case kCodecHEVC:
181 return AV_CODEC_ID_HEVC;
182 #endif
183 case kCodecTheora:
184 return AV_CODEC_ID_THEORA;
185 case kCodecMPEG4:
186 return AV_CODEC_ID_MPEG4;
187 case kCodecVP8:
188 return AV_CODEC_ID_VP8;
189 case kCodecVP9:
190 return AV_CODEC_ID_VP9;
191 default:
192 DVLOG(1) << "Unknown VideoCodec: " << video_codec;
194 return AV_CODEC_ID_NONE;
197 static VideoCodecProfile ProfileIDToVideoCodecProfile(int profile) {
198 // Clear out the CONSTRAINED & INTRA flags which are strict subsets of the
199 // corresponding profiles with which they're used.
200 profile &= ~FF_PROFILE_H264_CONSTRAINED;
201 profile &= ~FF_PROFILE_H264_INTRA;
202 switch (profile) {
203 case FF_PROFILE_H264_BASELINE:
204 return H264PROFILE_BASELINE;
205 case FF_PROFILE_H264_MAIN:
206 return H264PROFILE_MAIN;
207 case FF_PROFILE_H264_EXTENDED:
208 return H264PROFILE_EXTENDED;
209 case FF_PROFILE_H264_HIGH:
210 return H264PROFILE_HIGH;
211 case FF_PROFILE_H264_HIGH_10:
212 return H264PROFILE_HIGH10PROFILE;
213 case FF_PROFILE_H264_HIGH_422:
214 return H264PROFILE_HIGH422PROFILE;
215 case FF_PROFILE_H264_HIGH_444_PREDICTIVE:
216 return H264PROFILE_HIGH444PREDICTIVEPROFILE;
217 default:
218 DVLOG(1) << "Unknown profile id: " << profile;
220 return VIDEO_CODEC_PROFILE_UNKNOWN;
223 static int VideoCodecProfileToProfileID(VideoCodecProfile profile) {
224 switch (profile) {
225 case H264PROFILE_BASELINE:
226 return FF_PROFILE_H264_BASELINE;
227 case H264PROFILE_MAIN:
228 return FF_PROFILE_H264_MAIN;
229 case H264PROFILE_EXTENDED:
230 return FF_PROFILE_H264_EXTENDED;
231 case H264PROFILE_HIGH:
232 return FF_PROFILE_H264_HIGH;
233 case H264PROFILE_HIGH10PROFILE:
234 return FF_PROFILE_H264_HIGH_10;
235 case H264PROFILE_HIGH422PROFILE:
236 return FF_PROFILE_H264_HIGH_422;
237 case H264PROFILE_HIGH444PREDICTIVEPROFILE:
238 return FF_PROFILE_H264_HIGH_444_PREDICTIVE;
239 default:
240 DVLOG(1) << "Unknown VideoCodecProfile: " << profile;
242 return FF_PROFILE_UNKNOWN;
245 SampleFormat AVSampleFormatToSampleFormat(AVSampleFormat sample_format) {
246 switch (sample_format) {
247 case AV_SAMPLE_FMT_U8:
248 return kSampleFormatU8;
249 case AV_SAMPLE_FMT_S16:
250 return kSampleFormatS16;
251 case AV_SAMPLE_FMT_S32:
252 return kSampleFormatS32;
253 case AV_SAMPLE_FMT_FLT:
254 return kSampleFormatF32;
255 case AV_SAMPLE_FMT_S16P:
256 return kSampleFormatPlanarS16;
257 case AV_SAMPLE_FMT_S32P:
258 return kSampleFormatPlanarS32;
259 case AV_SAMPLE_FMT_FLTP:
260 return kSampleFormatPlanarF32;
261 default:
262 DVLOG(1) << "Unknown AVSampleFormat: " << sample_format;
264 return kUnknownSampleFormat;
267 static AVSampleFormat SampleFormatToAVSampleFormat(SampleFormat sample_format) {
268 switch (sample_format) {
269 case kSampleFormatU8:
270 return AV_SAMPLE_FMT_U8;
271 case kSampleFormatS16:
272 return AV_SAMPLE_FMT_S16;
273 case kSampleFormatS32:
274 return AV_SAMPLE_FMT_S32;
275 case kSampleFormatF32:
276 return AV_SAMPLE_FMT_FLT;
277 case kSampleFormatPlanarS16:
278 return AV_SAMPLE_FMT_S16P;
279 case kSampleFormatPlanarF32:
280 return AV_SAMPLE_FMT_FLTP;
281 default:
282 DVLOG(1) << "Unknown SampleFormat: " << sample_format;
284 return AV_SAMPLE_FMT_NONE;
287 void AVCodecContextToAudioDecoderConfig(const AVCodecContext* codec_context,
288 bool is_encrypted,
289 AudioDecoderConfig* config) {
290 DCHECK_EQ(codec_context->codec_type, AVMEDIA_TYPE_AUDIO);
292 AudioCodec codec = CodecIDToAudioCodec(codec_context->codec_id);
294 SampleFormat sample_format =
295 AVSampleFormatToSampleFormat(codec_context->sample_fmt);
297 ChannelLayout channel_layout = ChannelLayoutToChromeChannelLayout(
298 codec_context->channel_layout, codec_context->channels);
300 int sample_rate = codec_context->sample_rate;
301 if (codec == kCodecOpus) {
302 // |codec_context->sample_fmt| is not set by FFmpeg because Opus decoding is
303 // not enabled in FFmpeg. It doesn't matter what value is set here, so long
304 // as it's valid, the true sample format is selected inside the decoder.
305 sample_format = kSampleFormatF32;
307 // Always use 48kHz for OPUS. Technically we should match to the highest
308 // supported hardware sample rate among [8, 12, 16, 24, 48] kHz, but we
309 // don't know the hardware sample rate at this point and those rates are
310 // rarely used for output. See the "Input Sample Rate" section of the spec:
311 // http://tools.ietf.org/html/draft-terriberry-oggopus-01#page-11
312 sample_rate = 48000;
315 base::TimeDelta seek_preroll;
316 if (codec_context->seek_preroll > 0) {
317 seek_preroll = base::TimeDelta::FromMicroseconds(
318 codec_context->seek_preroll * 1000000.0 / codec_context->sample_rate);
321 config->Initialize(codec,
322 sample_format,
323 channel_layout,
324 sample_rate,
325 codec_context->extradata,
326 codec_context->extradata_size,
327 is_encrypted,
328 seek_preroll,
329 codec_context->delay);
331 if (codec != kCodecOpus) {
332 DCHECK_EQ(av_get_bytes_per_sample(codec_context->sample_fmt) * 8,
333 config->bits_per_channel());
337 void AVStreamToAudioDecoderConfig(const AVStream* stream,
338 AudioDecoderConfig* config) {
339 bool is_encrypted = false;
340 AVDictionaryEntry* key = av_dict_get(stream->metadata, "enc_key_id", NULL, 0);
341 if (key)
342 is_encrypted = true;
343 AVCodecContextToAudioDecoderConfig(stream->codec, is_encrypted, config);
346 void AudioDecoderConfigToAVCodecContext(const AudioDecoderConfig& config,
347 AVCodecContext* codec_context) {
348 codec_context->codec_type = AVMEDIA_TYPE_AUDIO;
349 codec_context->codec_id = AudioCodecToCodecID(config.codec(),
350 config.sample_format());
351 codec_context->sample_fmt = SampleFormatToAVSampleFormat(
352 config.sample_format());
354 // TODO(scherkus): should we set |channel_layout|? I'm not sure if FFmpeg uses
355 // said information to decode.
356 codec_context->channels =
357 ChannelLayoutToChannelCount(config.channel_layout());
358 codec_context->sample_rate = config.samples_per_second();
360 if (config.extra_data()) {
361 codec_context->extradata_size = config.extra_data_size();
362 codec_context->extradata = reinterpret_cast<uint8_t*>(
363 av_malloc(config.extra_data_size() + FF_INPUT_BUFFER_PADDING_SIZE));
364 memcpy(codec_context->extradata, config.extra_data(),
365 config.extra_data_size());
366 memset(codec_context->extradata + config.extra_data_size(), '\0',
367 FF_INPUT_BUFFER_PADDING_SIZE);
368 } else {
369 codec_context->extradata = NULL;
370 codec_context->extradata_size = 0;
374 void AVStreamToVideoDecoderConfig(const AVStream* stream,
375 VideoDecoderConfig* config) {
376 gfx::Size coded_size(stream->codec->coded_width, stream->codec->coded_height);
378 // TODO(vrk): This assumes decoded frame data starts at (0, 0), which is true
379 // for now, but may not always be true forever. Fix this in the future.
380 gfx::Rect visible_rect(stream->codec->width, stream->codec->height);
382 AVRational aspect_ratio = { 1, 1 };
383 if (stream->sample_aspect_ratio.num)
384 aspect_ratio = stream->sample_aspect_ratio;
385 else if (stream->codec->sample_aspect_ratio.num)
386 aspect_ratio = stream->codec->sample_aspect_ratio;
388 VideoCodec codec = CodecIDToVideoCodec(stream->codec->codec_id);
390 VideoCodecProfile profile = VIDEO_CODEC_PROFILE_UNKNOWN;
391 if (codec == kCodecVP8)
392 profile = VP8PROFILE_ANY;
393 else if (codec == kCodecVP9)
394 profile = VP9PROFILE_ANY;
395 else
396 profile = ProfileIDToVideoCodecProfile(stream->codec->profile);
398 // Without the FFmpeg h264 decoder, AVFormat is unable to get the profile, so
399 // default to baseline and let the VDA fail later if it doesn't support the
400 // real profile. This is alright because if the FFmpeg h264 decoder isn't
401 // enabled, there is no fallback if the VDA fails.
402 #if defined(DISABLE_FFMPEG_VIDEO_DECODERS)
403 if (codec == kCodecH264)
404 profile = H264PROFILE_BASELINE;
405 #endif
407 gfx::Size natural_size = GetNaturalSize(
408 visible_rect.size(), aspect_ratio.num, aspect_ratio.den);
410 VideoPixelFormat format =
411 AVPixelFormatToVideoPixelFormat(stream->codec->pix_fmt);
412 // The format and coded size may be unknown if FFmpeg is compiled without
413 // video decoders.
414 #if defined(DISABLE_FFMPEG_VIDEO_DECODERS)
415 if (format == PIXEL_FORMAT_UNKNOWN)
416 format = PIXEL_FORMAT_YV12;
417 if (coded_size == gfx::Size(0, 0))
418 coded_size = visible_rect.size();
419 #endif
421 if (codec == kCodecVP9) {
422 // TODO(tomfinegan): libavcodec doesn't know about VP9.
423 format = PIXEL_FORMAT_YV12;
424 coded_size = visible_rect.size();
427 // Pad out |coded_size| for subsampled YUV formats.
428 if (format != PIXEL_FORMAT_YV24) {
429 coded_size.set_width((coded_size.width() + 1) / 2 * 2);
430 if (format != PIXEL_FORMAT_YV16)
431 coded_size.set_height((coded_size.height() + 1) / 2 * 2);
434 bool is_encrypted = false;
435 AVDictionaryEntry* key = av_dict_get(stream->metadata, "enc_key_id", NULL, 0);
436 if (key)
437 is_encrypted = true;
439 AVDictionaryEntry* webm_alpha =
440 av_dict_get(stream->metadata, "alpha_mode", NULL, 0);
441 if (webm_alpha && !strcmp(webm_alpha->value, "1")) {
442 format = PIXEL_FORMAT_YV12A;
445 // Prefer the color space found by libavcodec if available.
446 ColorSpace color_space = AVColorSpaceToColorSpace(stream->codec->colorspace,
447 stream->codec->color_range);
448 if (color_space == COLOR_SPACE_UNSPECIFIED) {
449 // Otherwise, assume that SD video is usually Rec.601, and HD is usually
450 // Rec.709.
451 color_space = (natural_size.height() < 720) ? COLOR_SPACE_SD_REC601
452 : COLOR_SPACE_HD_REC709;
455 config->Initialize(codec, profile, format, color_space, coded_size,
456 visible_rect, natural_size, stream->codec->extradata,
457 stream->codec->extradata_size, is_encrypted);
460 void VideoDecoderConfigToAVCodecContext(
461 const VideoDecoderConfig& config,
462 AVCodecContext* codec_context) {
463 codec_context->codec_type = AVMEDIA_TYPE_VIDEO;
464 codec_context->codec_id = VideoCodecToCodecID(config.codec());
465 codec_context->profile = VideoCodecProfileToProfileID(config.profile());
466 codec_context->coded_width = config.coded_size().width();
467 codec_context->coded_height = config.coded_size().height();
468 codec_context->pix_fmt = VideoPixelFormatToAVPixelFormat(config.format());
469 if (config.color_space() == COLOR_SPACE_JPEG)
470 codec_context->color_range = AVCOL_RANGE_JPEG;
472 if (config.extra_data()) {
473 codec_context->extradata_size = config.extra_data_size();
474 codec_context->extradata = reinterpret_cast<uint8_t*>(
475 av_malloc(config.extra_data_size() + FF_INPUT_BUFFER_PADDING_SIZE));
476 memcpy(codec_context->extradata, config.extra_data(),
477 config.extra_data_size());
478 memset(codec_context->extradata + config.extra_data_size(), '\0',
479 FF_INPUT_BUFFER_PADDING_SIZE);
480 } else {
481 codec_context->extradata = NULL;
482 codec_context->extradata_size = 0;
486 ChannelLayout ChannelLayoutToChromeChannelLayout(int64_t layout, int channels) {
487 switch (layout) {
488 case AV_CH_LAYOUT_MONO:
489 return CHANNEL_LAYOUT_MONO;
490 case AV_CH_LAYOUT_STEREO:
491 return CHANNEL_LAYOUT_STEREO;
492 case AV_CH_LAYOUT_2_1:
493 return CHANNEL_LAYOUT_2_1;
494 case AV_CH_LAYOUT_SURROUND:
495 return CHANNEL_LAYOUT_SURROUND;
496 case AV_CH_LAYOUT_4POINT0:
497 return CHANNEL_LAYOUT_4_0;
498 case AV_CH_LAYOUT_2_2:
499 return CHANNEL_LAYOUT_2_2;
500 case AV_CH_LAYOUT_QUAD:
501 return CHANNEL_LAYOUT_QUAD;
502 case AV_CH_LAYOUT_5POINT0:
503 return CHANNEL_LAYOUT_5_0;
504 case AV_CH_LAYOUT_5POINT1:
505 return CHANNEL_LAYOUT_5_1;
506 case AV_CH_LAYOUT_5POINT0_BACK:
507 return CHANNEL_LAYOUT_5_0_BACK;
508 case AV_CH_LAYOUT_5POINT1_BACK:
509 return CHANNEL_LAYOUT_5_1_BACK;
510 case AV_CH_LAYOUT_7POINT0:
511 return CHANNEL_LAYOUT_7_0;
512 case AV_CH_LAYOUT_7POINT1:
513 return CHANNEL_LAYOUT_7_1;
514 case AV_CH_LAYOUT_7POINT1_WIDE:
515 return CHANNEL_LAYOUT_7_1_WIDE;
516 case AV_CH_LAYOUT_STEREO_DOWNMIX:
517 return CHANNEL_LAYOUT_STEREO_DOWNMIX;
518 case AV_CH_LAYOUT_2POINT1:
519 return CHANNEL_LAYOUT_2POINT1;
520 case AV_CH_LAYOUT_3POINT1:
521 return CHANNEL_LAYOUT_3_1;
522 case AV_CH_LAYOUT_4POINT1:
523 return CHANNEL_LAYOUT_4_1;
524 case AV_CH_LAYOUT_6POINT0:
525 return CHANNEL_LAYOUT_6_0;
526 case AV_CH_LAYOUT_6POINT0_FRONT:
527 return CHANNEL_LAYOUT_6_0_FRONT;
528 case AV_CH_LAYOUT_HEXAGONAL:
529 return CHANNEL_LAYOUT_HEXAGONAL;
530 case AV_CH_LAYOUT_6POINT1:
531 return CHANNEL_LAYOUT_6_1;
532 case AV_CH_LAYOUT_6POINT1_BACK:
533 return CHANNEL_LAYOUT_6_1_BACK;
534 case AV_CH_LAYOUT_6POINT1_FRONT:
535 return CHANNEL_LAYOUT_6_1_FRONT;
536 case AV_CH_LAYOUT_7POINT0_FRONT:
537 return CHANNEL_LAYOUT_7_0_FRONT;
538 #ifdef AV_CH_LAYOUT_7POINT1_WIDE_BACK
539 case AV_CH_LAYOUT_7POINT1_WIDE_BACK:
540 return CHANNEL_LAYOUT_7_1_WIDE_BACK;
541 #endif
542 case AV_CH_LAYOUT_OCTAGONAL:
543 return CHANNEL_LAYOUT_OCTAGONAL;
544 default:
545 // FFmpeg channel_layout is 0 for .wav and .mp3. Attempt to guess layout
546 // based on the channel count.
547 return GuessChannelLayout(channels);
551 VideoPixelFormat AVPixelFormatToVideoPixelFormat(AVPixelFormat pixel_format) {
552 // The YUVJ alternatives are FFmpeg's (deprecated, but still in use) way to
553 // specify a pixel format and full range color combination.
554 switch (pixel_format) {
555 case AV_PIX_FMT_YUV422P:
556 case AV_PIX_FMT_YUVJ422P:
557 return PIXEL_FORMAT_YV16;
558 case AV_PIX_FMT_YUV444P:
559 case AV_PIX_FMT_YUVJ444P:
560 return PIXEL_FORMAT_YV24;
561 case AV_PIX_FMT_YUV420P:
562 case AV_PIX_FMT_YUVJ420P:
563 return PIXEL_FORMAT_YV12;
564 case AV_PIX_FMT_YUVA420P:
565 return PIXEL_FORMAT_YV12A;
566 default:
567 DVLOG(1) << "Unsupported AVPixelFormat: " << pixel_format;
569 return PIXEL_FORMAT_UNKNOWN;
572 AVPixelFormat VideoPixelFormatToAVPixelFormat(VideoPixelFormat video_format) {
573 switch (video_format) {
574 case PIXEL_FORMAT_YV16:
575 return AV_PIX_FMT_YUV422P;
576 case PIXEL_FORMAT_YV12:
577 return AV_PIX_FMT_YUV420P;
578 case PIXEL_FORMAT_YV12A:
579 return AV_PIX_FMT_YUVA420P;
580 case PIXEL_FORMAT_YV24:
581 return AV_PIX_FMT_YUV444P;
582 default:
583 DVLOG(1) << "Unsupported Format: " << video_format;
585 return AV_PIX_FMT_NONE;
588 ColorSpace AVColorSpaceToColorSpace(AVColorSpace color_space,
589 AVColorRange color_range) {
590 if (color_range == AVCOL_RANGE_JPEG)
591 return COLOR_SPACE_JPEG;
593 switch (color_space) {
594 case AVCOL_SPC_UNSPECIFIED:
595 break;
596 case AVCOL_SPC_BT709:
597 return COLOR_SPACE_HD_REC709;
598 case AVCOL_SPC_SMPTE170M:
599 case AVCOL_SPC_BT470BG:
600 return COLOR_SPACE_SD_REC601;
601 default:
602 DVLOG(1) << "Unknown AVColorSpace: " << color_space;
604 return COLOR_SPACE_UNSPECIFIED;
607 bool FFmpegUTCDateToTime(const char* date_utc, base::Time* out) {
608 DCHECK(date_utc);
609 DCHECK(out);
611 std::vector<base::StringPiece> fields = base::SplitStringPiece(
612 date_utc, " ", base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
613 if (fields.size() != 2)
614 return false;
616 std::vector<base::StringPiece> date_fields = base::SplitStringPiece(
617 fields[0], "-", base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
618 if (date_fields.size() != 3)
619 return false;
621 // TODO(acolwell): Update this parsing code when FFmpeg returns sub-second
622 // information.
623 std::vector<base::StringPiece> time_fields = base::SplitStringPiece(
624 fields[1], ":", base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
625 if (time_fields.size() != 3)
626 return false;
628 base::Time::Exploded exploded;
629 exploded.millisecond = 0;
630 if (base::StringToInt(date_fields[0], &exploded.year) &&
631 base::StringToInt(date_fields[1], &exploded.month) &&
632 base::StringToInt(date_fields[2], &exploded.day_of_month) &&
633 base::StringToInt(time_fields[0], &exploded.hour) &&
634 base::StringToInt(time_fields[1], &exploded.minute) &&
635 base::StringToInt(time_fields[2], &exploded.second)) {
636 base::Time parsed_time = base::Time::FromUTCExploded(exploded);
637 if (parsed_time.is_null())
638 return false;
640 *out = parsed_time;
641 return true;
644 return false;
647 } // namespace media