Windows precompiled header support in GN.
[chromium-blink-merge.git] / media / ffmpeg / ffmpeg_common.cc
blob25f6ed4199611e7f7cff332be800d1a3cc9b4238
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "media/ffmpeg/ffmpeg_common.h"
7 #include "base/basictypes.h"
8 #include "base/logging.h"
9 #include "base/metrics/histogram.h"
10 #include "base/strings/string_number_conversions.h"
11 #include "base/strings/string_util.h"
12 #include "media/base/decoder_buffer.h"
13 #include "media/base/video_util.h"
15 namespace media {
17 // Why FF_INPUT_BUFFER_PADDING_SIZE? FFmpeg assumes all input buffers are
18 // padded. Check here to ensure FFmpeg only receives data padded to its
19 // specifications.
20 static_assert(DecoderBuffer::kPaddingSize >= FF_INPUT_BUFFER_PADDING_SIZE,
21 "DecoderBuffer padding size does not fit ffmpeg requirement");
23 // Alignment requirement by FFmpeg for input and output buffers. This need to
24 // be updated to match FFmpeg when it changes.
25 #if defined(ARCH_CPU_ARM_FAMILY)
26 static const int kFFmpegBufferAddressAlignment = 16;
27 #else
28 static const int kFFmpegBufferAddressAlignment = 32;
29 #endif
31 // Check here to ensure FFmpeg only receives data aligned to its specifications.
32 static_assert(
33 DecoderBuffer::kAlignmentSize >= kFFmpegBufferAddressAlignment &&
34 DecoderBuffer::kAlignmentSize % kFFmpegBufferAddressAlignment == 0,
35 "DecoderBuffer alignment size does not fit ffmpeg requirement");
37 // Allows faster SIMD YUV convert. Also, FFmpeg overreads/-writes occasionally.
38 // See video_get_buffer() in libavcodec/utils.c.
39 static const int kFFmpegOutputBufferPaddingSize = 16;
41 static_assert(VideoFrame::kFrameSizePadding >= kFFmpegOutputBufferPaddingSize,
42 "VideoFrame padding size does not fit ffmpeg requirement");
44 static_assert(
45 VideoFrame::kFrameAddressAlignment >= kFFmpegBufferAddressAlignment &&
46 VideoFrame::kFrameAddressAlignment % kFFmpegBufferAddressAlignment == 0,
47 "VideoFrame frame address alignment does not fit ffmpeg requirement");
49 static const AVRational kMicrosBase = { 1, base::Time::kMicrosecondsPerSecond };
51 base::TimeDelta ConvertFromTimeBase(const AVRational& time_base,
52 int64 timestamp) {
53 int64 microseconds = av_rescale_q(timestamp, time_base, kMicrosBase);
54 return base::TimeDelta::FromMicroseconds(microseconds);
57 int64 ConvertToTimeBase(const AVRational& time_base,
58 const base::TimeDelta& timestamp) {
59 return av_rescale_q(timestamp.InMicroseconds(), kMicrosBase, time_base);
62 // Converts an FFmpeg audio codec ID into its corresponding supported codec id.
63 static AudioCodec CodecIDToAudioCodec(AVCodecID codec_id) {
64 switch (codec_id) {
65 case AV_CODEC_ID_AAC:
66 return kCodecAAC;
67 case AV_CODEC_ID_MP3:
68 return kCodecMP3;
69 case AV_CODEC_ID_VORBIS:
70 return kCodecVorbis;
71 case AV_CODEC_ID_PCM_U8:
72 case AV_CODEC_ID_PCM_S16LE:
73 case AV_CODEC_ID_PCM_S24LE:
74 case AV_CODEC_ID_PCM_F32LE:
75 return kCodecPCM;
76 case AV_CODEC_ID_PCM_S16BE:
77 return kCodecPCM_S16BE;
78 case AV_CODEC_ID_PCM_S24BE:
79 return kCodecPCM_S24BE;
80 case AV_CODEC_ID_FLAC:
81 return kCodecFLAC;
82 case AV_CODEC_ID_AMR_NB:
83 return kCodecAMR_NB;
84 case AV_CODEC_ID_AMR_WB:
85 return kCodecAMR_WB;
86 case AV_CODEC_ID_GSM_MS:
87 return kCodecGSM_MS;
88 case AV_CODEC_ID_PCM_ALAW:
89 return kCodecPCM_ALAW;
90 case AV_CODEC_ID_PCM_MULAW:
91 return kCodecPCM_MULAW;
92 case AV_CODEC_ID_OPUS:
93 return kCodecOpus;
94 case AV_CODEC_ID_ALAC:
95 return kCodecALAC;
96 default:
97 DVLOG(1) << "Unknown audio CodecID: " << codec_id;
99 return kUnknownAudioCodec;
102 static AVCodecID AudioCodecToCodecID(AudioCodec audio_codec,
103 SampleFormat sample_format) {
104 switch (audio_codec) {
105 case kCodecAAC:
106 return AV_CODEC_ID_AAC;
107 case kCodecALAC:
108 return AV_CODEC_ID_ALAC;
109 case kCodecMP3:
110 return AV_CODEC_ID_MP3;
111 case kCodecPCM:
112 switch (sample_format) {
113 case kSampleFormatU8:
114 return AV_CODEC_ID_PCM_U8;
115 case kSampleFormatS16:
116 return AV_CODEC_ID_PCM_S16LE;
117 case kSampleFormatS32:
118 return AV_CODEC_ID_PCM_S24LE;
119 case kSampleFormatF32:
120 return AV_CODEC_ID_PCM_F32LE;
121 default:
122 DVLOG(1) << "Unsupported sample format: " << sample_format;
124 break;
125 case kCodecPCM_S16BE:
126 return AV_CODEC_ID_PCM_S16BE;
127 case kCodecPCM_S24BE:
128 return AV_CODEC_ID_PCM_S24BE;
129 case kCodecVorbis:
130 return AV_CODEC_ID_VORBIS;
131 case kCodecFLAC:
132 return AV_CODEC_ID_FLAC;
133 case kCodecAMR_NB:
134 return AV_CODEC_ID_AMR_NB;
135 case kCodecAMR_WB:
136 return AV_CODEC_ID_AMR_WB;
137 case kCodecGSM_MS:
138 return AV_CODEC_ID_GSM_MS;
139 case kCodecPCM_ALAW:
140 return AV_CODEC_ID_PCM_ALAW;
141 case kCodecPCM_MULAW:
142 return AV_CODEC_ID_PCM_MULAW;
143 case kCodecOpus:
144 return AV_CODEC_ID_OPUS;
145 default:
146 DVLOG(1) << "Unknown AudioCodec: " << audio_codec;
148 return AV_CODEC_ID_NONE;
151 // Converts an FFmpeg video codec ID into its corresponding supported codec id.
152 static VideoCodec CodecIDToVideoCodec(AVCodecID codec_id) {
153 switch (codec_id) {
154 case AV_CODEC_ID_H264:
155 return kCodecH264;
156 case AV_CODEC_ID_THEORA:
157 return kCodecTheora;
158 case AV_CODEC_ID_MPEG4:
159 return kCodecMPEG4;
160 case AV_CODEC_ID_VP8:
161 return kCodecVP8;
162 case AV_CODEC_ID_VP9:
163 return kCodecVP9;
164 default:
165 DVLOG(1) << "Unknown video CodecID: " << codec_id;
167 return kUnknownVideoCodec;
170 static AVCodecID VideoCodecToCodecID(VideoCodec video_codec) {
171 switch (video_codec) {
172 case kCodecH264:
173 return AV_CODEC_ID_H264;
174 case kCodecTheora:
175 return AV_CODEC_ID_THEORA;
176 case kCodecMPEG4:
177 return AV_CODEC_ID_MPEG4;
178 case kCodecVP8:
179 return AV_CODEC_ID_VP8;
180 case kCodecVP9:
181 return AV_CODEC_ID_VP9;
182 default:
183 DVLOG(1) << "Unknown VideoCodec: " << video_codec;
185 return AV_CODEC_ID_NONE;
188 static VideoCodecProfile ProfileIDToVideoCodecProfile(int profile) {
189 // Clear out the CONSTRAINED & INTRA flags which are strict subsets of the
190 // corresponding profiles with which they're used.
191 profile &= ~FF_PROFILE_H264_CONSTRAINED;
192 profile &= ~FF_PROFILE_H264_INTRA;
193 switch (profile) {
194 case FF_PROFILE_H264_BASELINE:
195 return H264PROFILE_BASELINE;
196 case FF_PROFILE_H264_MAIN:
197 return H264PROFILE_MAIN;
198 case FF_PROFILE_H264_EXTENDED:
199 return H264PROFILE_EXTENDED;
200 case FF_PROFILE_H264_HIGH:
201 return H264PROFILE_HIGH;
202 case FF_PROFILE_H264_HIGH_10:
203 return H264PROFILE_HIGH10PROFILE;
204 case FF_PROFILE_H264_HIGH_422:
205 return H264PROFILE_HIGH422PROFILE;
206 case FF_PROFILE_H264_HIGH_444_PREDICTIVE:
207 return H264PROFILE_HIGH444PREDICTIVEPROFILE;
208 default:
209 DVLOG(1) << "Unknown profile id: " << profile;
211 return VIDEO_CODEC_PROFILE_UNKNOWN;
214 static int VideoCodecProfileToProfileID(VideoCodecProfile profile) {
215 switch (profile) {
216 case H264PROFILE_BASELINE:
217 return FF_PROFILE_H264_BASELINE;
218 case H264PROFILE_MAIN:
219 return FF_PROFILE_H264_MAIN;
220 case H264PROFILE_EXTENDED:
221 return FF_PROFILE_H264_EXTENDED;
222 case H264PROFILE_HIGH:
223 return FF_PROFILE_H264_HIGH;
224 case H264PROFILE_HIGH10PROFILE:
225 return FF_PROFILE_H264_HIGH_10;
226 case H264PROFILE_HIGH422PROFILE:
227 return FF_PROFILE_H264_HIGH_422;
228 case H264PROFILE_HIGH444PREDICTIVEPROFILE:
229 return FF_PROFILE_H264_HIGH_444_PREDICTIVE;
230 default:
231 DVLOG(1) << "Unknown VideoCodecProfile: " << profile;
233 return FF_PROFILE_UNKNOWN;
236 SampleFormat AVSampleFormatToSampleFormat(AVSampleFormat sample_format) {
237 switch (sample_format) {
238 case AV_SAMPLE_FMT_U8:
239 return kSampleFormatU8;
240 case AV_SAMPLE_FMT_S16:
241 return kSampleFormatS16;
242 case AV_SAMPLE_FMT_S32:
243 return kSampleFormatS32;
244 case AV_SAMPLE_FMT_FLT:
245 return kSampleFormatF32;
246 case AV_SAMPLE_FMT_S16P:
247 return kSampleFormatPlanarS16;
248 case AV_SAMPLE_FMT_S32P:
249 return kSampleFormatPlanarS32;
250 case AV_SAMPLE_FMT_FLTP:
251 return kSampleFormatPlanarF32;
252 default:
253 DVLOG(1) << "Unknown AVSampleFormat: " << sample_format;
255 return kUnknownSampleFormat;
258 static AVSampleFormat SampleFormatToAVSampleFormat(SampleFormat sample_format) {
259 switch (sample_format) {
260 case kSampleFormatU8:
261 return AV_SAMPLE_FMT_U8;
262 case kSampleFormatS16:
263 return AV_SAMPLE_FMT_S16;
264 case kSampleFormatS32:
265 return AV_SAMPLE_FMT_S32;
266 case kSampleFormatF32:
267 return AV_SAMPLE_FMT_FLT;
268 case kSampleFormatPlanarS16:
269 return AV_SAMPLE_FMT_S16P;
270 case kSampleFormatPlanarF32:
271 return AV_SAMPLE_FMT_FLTP;
272 default:
273 DVLOG(1) << "Unknown SampleFormat: " << sample_format;
275 return AV_SAMPLE_FMT_NONE;
278 void AVCodecContextToAudioDecoderConfig(
279 const AVCodecContext* codec_context,
280 bool is_encrypted,
281 AudioDecoderConfig* config,
282 bool record_stats) {
283 DCHECK_EQ(codec_context->codec_type, AVMEDIA_TYPE_AUDIO);
285 AudioCodec codec = CodecIDToAudioCodec(codec_context->codec_id);
287 SampleFormat sample_format =
288 AVSampleFormatToSampleFormat(codec_context->sample_fmt);
290 ChannelLayout channel_layout = ChannelLayoutToChromeChannelLayout(
291 codec_context->channel_layout, codec_context->channels);
293 int sample_rate = codec_context->sample_rate;
294 if (codec == kCodecOpus) {
295 // |codec_context->sample_fmt| is not set by FFmpeg because Opus decoding is
296 // not enabled in FFmpeg. It doesn't matter what value is set here, so long
297 // as it's valid, the true sample format is selected inside the decoder.
298 sample_format = kSampleFormatF32;
300 // Always use 48kHz for OPUS. Technically we should match to the highest
301 // supported hardware sample rate among [8, 12, 16, 24, 48] kHz, but we
302 // don't know the hardware sample rate at this point and those rates are
303 // rarely used for output. See the "Input Sample Rate" section of the spec:
304 // http://tools.ietf.org/html/draft-terriberry-oggopus-01#page-11
305 sample_rate = 48000;
308 base::TimeDelta seek_preroll;
309 if (codec_context->seek_preroll > 0) {
310 seek_preroll = base::TimeDelta::FromMicroseconds(
311 codec_context->seek_preroll * 1000000.0 / codec_context->sample_rate);
314 config->Initialize(codec,
315 sample_format,
316 channel_layout,
317 sample_rate,
318 codec_context->extradata,
319 codec_context->extradata_size,
320 is_encrypted,
321 record_stats,
322 seek_preroll,
323 codec_context->delay);
324 if (codec != kCodecOpus) {
325 DCHECK_EQ(av_get_bytes_per_sample(codec_context->sample_fmt) * 8,
326 config->bits_per_channel());
330 void AVStreamToAudioDecoderConfig(
331 const AVStream* stream,
332 AudioDecoderConfig* config,
333 bool record_stats) {
334 bool is_encrypted = false;
335 AVDictionaryEntry* key = av_dict_get(stream->metadata, "enc_key_id", NULL, 0);
336 if (key)
337 is_encrypted = true;
338 return AVCodecContextToAudioDecoderConfig(
339 stream->codec, is_encrypted, config, record_stats);
342 void AudioDecoderConfigToAVCodecContext(const AudioDecoderConfig& config,
343 AVCodecContext* codec_context) {
344 codec_context->codec_type = AVMEDIA_TYPE_AUDIO;
345 codec_context->codec_id = AudioCodecToCodecID(config.codec(),
346 config.sample_format());
347 codec_context->sample_fmt = SampleFormatToAVSampleFormat(
348 config.sample_format());
350 // TODO(scherkus): should we set |channel_layout|? I'm not sure if FFmpeg uses
351 // said information to decode.
352 codec_context->channels =
353 ChannelLayoutToChannelCount(config.channel_layout());
354 codec_context->sample_rate = config.samples_per_second();
356 if (config.extra_data()) {
357 codec_context->extradata_size = config.extra_data_size();
358 codec_context->extradata = reinterpret_cast<uint8_t*>(
359 av_malloc(config.extra_data_size() + FF_INPUT_BUFFER_PADDING_SIZE));
360 memcpy(codec_context->extradata, config.extra_data(),
361 config.extra_data_size());
362 memset(codec_context->extradata + config.extra_data_size(), '\0',
363 FF_INPUT_BUFFER_PADDING_SIZE);
364 } else {
365 codec_context->extradata = NULL;
366 codec_context->extradata_size = 0;
370 void AVStreamToVideoDecoderConfig(
371 const AVStream* stream,
372 VideoDecoderConfig* config,
373 bool record_stats) {
374 gfx::Size coded_size(stream->codec->coded_width, stream->codec->coded_height);
376 // TODO(vrk): This assumes decoded frame data starts at (0, 0), which is true
377 // for now, but may not always be true forever. Fix this in the future.
378 gfx::Rect visible_rect(stream->codec->width, stream->codec->height);
380 AVRational aspect_ratio = { 1, 1 };
381 if (stream->sample_aspect_ratio.num)
382 aspect_ratio = stream->sample_aspect_ratio;
383 else if (stream->codec->sample_aspect_ratio.num)
384 aspect_ratio = stream->codec->sample_aspect_ratio;
386 VideoCodec codec = CodecIDToVideoCodec(stream->codec->codec_id);
388 VideoCodecProfile profile = VIDEO_CODEC_PROFILE_UNKNOWN;
389 if (codec == kCodecVP8)
390 profile = VP8PROFILE_ANY;
391 else if (codec == kCodecVP9)
392 profile = VP9PROFILE_ANY;
393 else
394 profile = ProfileIDToVideoCodecProfile(stream->codec->profile);
396 gfx::Size natural_size = GetNaturalSize(
397 visible_rect.size(), aspect_ratio.num, aspect_ratio.den);
399 if (record_stats) {
400 // Note the PRESUBMIT_IGNORE_UMA_MAX below, this silences the PRESUBMIT.py
401 // check for uma enum max usage, since we're abusing
402 // UMA_HISTOGRAM_ENUMERATION to report a discrete value.
403 UMA_HISTOGRAM_ENUMERATION("Media.VideoColorRange",
404 stream->codec->color_range,
405 AVCOL_RANGE_NB); // PRESUBMIT_IGNORE_UMA_MAX
408 VideoFrame::Format format = PixelFormatToVideoFormat(stream->codec->pix_fmt);
409 if (codec == kCodecVP9) {
410 // TODO(tomfinegan): libavcodec doesn't know about VP9.
411 format = VideoFrame::YV12;
412 coded_size = visible_rect.size();
415 // Pad out |coded_size| for subsampled YUV formats.
416 if (format != VideoFrame::YV24) {
417 coded_size.set_width((coded_size.width() + 1) / 2 * 2);
418 if (format != VideoFrame::YV16)
419 coded_size.set_height((coded_size.height() + 1) / 2 * 2);
422 bool is_encrypted = false;
423 AVDictionaryEntry* key = av_dict_get(stream->metadata, "enc_key_id", NULL, 0);
424 if (key)
425 is_encrypted = true;
427 AVDictionaryEntry* webm_alpha =
428 av_dict_get(stream->metadata, "alpha_mode", NULL, 0);
429 if (webm_alpha && !strcmp(webm_alpha->value, "1")) {
430 format = VideoFrame::YV12A;
433 config->Initialize(codec,
434 profile,
435 format,
436 (stream->codec->colorspace == AVCOL_SPC_BT709)
437 ? VideoFrame::COLOR_SPACE_HD_REC709
438 : VideoFrame::COLOR_SPACE_UNSPECIFIED,
439 coded_size, visible_rect, natural_size,
440 stream->codec->extradata, stream->codec->extradata_size,
441 is_encrypted,
442 record_stats);
445 void VideoDecoderConfigToAVCodecContext(
446 const VideoDecoderConfig& config,
447 AVCodecContext* codec_context) {
448 codec_context->codec_type = AVMEDIA_TYPE_VIDEO;
449 codec_context->codec_id = VideoCodecToCodecID(config.codec());
450 codec_context->profile = VideoCodecProfileToProfileID(config.profile());
451 codec_context->coded_width = config.coded_size().width();
452 codec_context->coded_height = config.coded_size().height();
453 codec_context->pix_fmt = VideoFormatToPixelFormat(config.format());
455 if (config.extra_data()) {
456 codec_context->extradata_size = config.extra_data_size();
457 codec_context->extradata = reinterpret_cast<uint8_t*>(
458 av_malloc(config.extra_data_size() + FF_INPUT_BUFFER_PADDING_SIZE));
459 memcpy(codec_context->extradata, config.extra_data(),
460 config.extra_data_size());
461 memset(codec_context->extradata + config.extra_data_size(), '\0',
462 FF_INPUT_BUFFER_PADDING_SIZE);
463 } else {
464 codec_context->extradata = NULL;
465 codec_context->extradata_size = 0;
469 ChannelLayout ChannelLayoutToChromeChannelLayout(int64_t layout, int channels) {
470 switch (layout) {
471 case AV_CH_LAYOUT_MONO:
472 return CHANNEL_LAYOUT_MONO;
473 case AV_CH_LAYOUT_STEREO:
474 return CHANNEL_LAYOUT_STEREO;
475 case AV_CH_LAYOUT_2_1:
476 return CHANNEL_LAYOUT_2_1;
477 case AV_CH_LAYOUT_SURROUND:
478 return CHANNEL_LAYOUT_SURROUND;
479 case AV_CH_LAYOUT_4POINT0:
480 return CHANNEL_LAYOUT_4_0;
481 case AV_CH_LAYOUT_2_2:
482 return CHANNEL_LAYOUT_2_2;
483 case AV_CH_LAYOUT_QUAD:
484 return CHANNEL_LAYOUT_QUAD;
485 case AV_CH_LAYOUT_5POINT0:
486 return CHANNEL_LAYOUT_5_0;
487 case AV_CH_LAYOUT_5POINT1:
488 return CHANNEL_LAYOUT_5_1;
489 case AV_CH_LAYOUT_5POINT0_BACK:
490 return CHANNEL_LAYOUT_5_0_BACK;
491 case AV_CH_LAYOUT_5POINT1_BACK:
492 return CHANNEL_LAYOUT_5_1_BACK;
493 case AV_CH_LAYOUT_7POINT0:
494 return CHANNEL_LAYOUT_7_0;
495 case AV_CH_LAYOUT_7POINT1:
496 return CHANNEL_LAYOUT_7_1;
497 case AV_CH_LAYOUT_7POINT1_WIDE:
498 return CHANNEL_LAYOUT_7_1_WIDE;
499 case AV_CH_LAYOUT_STEREO_DOWNMIX:
500 return CHANNEL_LAYOUT_STEREO_DOWNMIX;
501 case AV_CH_LAYOUT_2POINT1:
502 return CHANNEL_LAYOUT_2POINT1;
503 case AV_CH_LAYOUT_3POINT1:
504 return CHANNEL_LAYOUT_3_1;
505 case AV_CH_LAYOUT_4POINT1:
506 return CHANNEL_LAYOUT_4_1;
507 case AV_CH_LAYOUT_6POINT0:
508 return CHANNEL_LAYOUT_6_0;
509 case AV_CH_LAYOUT_6POINT0_FRONT:
510 return CHANNEL_LAYOUT_6_0_FRONT;
511 case AV_CH_LAYOUT_HEXAGONAL:
512 return CHANNEL_LAYOUT_HEXAGONAL;
513 case AV_CH_LAYOUT_6POINT1:
514 return CHANNEL_LAYOUT_6_1;
515 case AV_CH_LAYOUT_6POINT1_BACK:
516 return CHANNEL_LAYOUT_6_1_BACK;
517 case AV_CH_LAYOUT_6POINT1_FRONT:
518 return CHANNEL_LAYOUT_6_1_FRONT;
519 case AV_CH_LAYOUT_7POINT0_FRONT:
520 return CHANNEL_LAYOUT_7_0_FRONT;
521 #ifdef AV_CH_LAYOUT_7POINT1_WIDE_BACK
522 case AV_CH_LAYOUT_7POINT1_WIDE_BACK:
523 return CHANNEL_LAYOUT_7_1_WIDE_BACK;
524 #endif
525 case AV_CH_LAYOUT_OCTAGONAL:
526 return CHANNEL_LAYOUT_OCTAGONAL;
527 default:
528 // FFmpeg channel_layout is 0 for .wav and .mp3. Attempt to guess layout
529 // based on the channel count.
530 return GuessChannelLayout(channels);
534 VideoFrame::Format PixelFormatToVideoFormat(PixelFormat pixel_format) {
535 switch (pixel_format) {
536 case PIX_FMT_YUV422P:
537 return VideoFrame::YV16;
538 case PIX_FMT_YUV444P:
539 return VideoFrame::YV24;
540 case PIX_FMT_YUV420P:
541 return VideoFrame::YV12;
542 case PIX_FMT_YUVA420P:
543 return VideoFrame::YV12A;
544 default:
545 DVLOG(1) << "Unsupported PixelFormat: " << pixel_format;
547 return VideoFrame::UNKNOWN;
550 PixelFormat VideoFormatToPixelFormat(VideoFrame::Format video_format) {
551 switch (video_format) {
552 case VideoFrame::YV16:
553 return PIX_FMT_YUV422P;
554 case VideoFrame::YV12:
555 return PIX_FMT_YUV420P;
556 case VideoFrame::YV12A:
557 return PIX_FMT_YUVA420P;
558 case VideoFrame::YV24:
559 return PIX_FMT_YUV444P;
560 default:
561 DVLOG(1) << "Unsupported VideoFrame::Format: " << video_format;
563 return PIX_FMT_NONE;
566 bool FFmpegUTCDateToTime(const char* date_utc,
567 base::Time* out) {
568 DCHECK(date_utc);
569 DCHECK(out);
571 std::vector<std::string> fields;
572 std::vector<std::string> date_fields;
573 std::vector<std::string> time_fields;
574 base::Time::Exploded exploded;
575 exploded.millisecond = 0;
577 // TODO(acolwell): Update this parsing code when FFmpeg returns sub-second
578 // information.
579 if ((Tokenize(date_utc, " ", &fields) == 2) &&
580 (Tokenize(fields[0], "-", &date_fields) == 3) &&
581 (Tokenize(fields[1], ":", &time_fields) == 3) &&
582 base::StringToInt(date_fields[0], &exploded.year) &&
583 base::StringToInt(date_fields[1], &exploded.month) &&
584 base::StringToInt(date_fields[2], &exploded.day_of_month) &&
585 base::StringToInt(time_fields[0], &exploded.hour) &&
586 base::StringToInt(time_fields[1], &exploded.minute) &&
587 base::StringToInt(time_fields[2], &exploded.second)) {
588 base::Time parsed_time = base::Time::FromUTCExploded(exploded);
589 if (parsed_time.is_null())
590 return false;
592 *out = parsed_time;
593 return true;
596 return false;
599 } // namespace media