Cleanup: Update the path to gfx size headers.
[chromium-blink-merge.git] / media / cast / test / fake_media_source.cc
blob2742c05547c5fce34f53fa5ecb987a9c9980cd9e
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "media/cast/test/fake_media_source.h"
7 #include "base/files/memory_mapped_file.h"
8 #include "base/files/scoped_file.h"
9 #include "base/logging.h"
10 #include "base/strings/string_number_conversions.h"
11 #include "media/audio/audio_parameters.h"
12 #include "media/base/audio_buffer.h"
13 #include "media/base/audio_bus.h"
14 #include "media/base/audio_fifo.h"
15 #include "media/base/audio_timestamp_helper.h"
16 #include "media/base/media.h"
17 #include "media/base/multi_channel_resampler.h"
18 #include "media/base/video_frame.h"
19 #include "media/base/video_util.h"
20 #include "media/cast/cast_sender.h"
21 #include "media/cast/test/utility/audio_utility.h"
22 #include "media/cast/test/utility/video_utility.h"
23 #include "media/ffmpeg/ffmpeg_common.h"
24 #include "media/ffmpeg/ffmpeg_deleters.h"
25 #include "media/filters/audio_renderer_algorithm.h"
26 #include "media/filters/ffmpeg_demuxer.h"
27 #include "media/filters/ffmpeg_glue.h"
28 #include "media/filters/in_memory_url_protocol.h"
29 #include "ui/gfx/geometry/size.h"
31 namespace {
33 static const int kAudioChannels = 2;
34 static const int kAudioSamplingFrequency = 48000;
35 static const int kSoundFrequency = 1234; // Frequency of sinusoid wave.
36 static const float kSoundVolume = 0.5f;
37 static const int kAudioFrameMs = 10; // Each audio frame is exactly 10ms.
38 static const int kAudioPacketsPerSecond = 1000 / kAudioFrameMs;
40 void AVFreeFrame(AVFrame* frame) {
41 av_frame_free(&frame);
44 base::TimeDelta PtsToTimeDelta(int64 pts, const AVRational& time_base) {
45 return pts * base::TimeDelta::FromSeconds(1) * time_base.num / time_base.den;
48 int64 TimeDeltaToPts(base::TimeDelta delta, const AVRational& time_base) {
49 return static_cast<int64>(
50 delta.InSecondsF() * time_base.den / time_base.num +
51 0.5 /* rounding */);
54 } // namespace
56 namespace media {
57 namespace cast {
59 FakeMediaSource::FakeMediaSource(
60 scoped_refptr<base::SingleThreadTaskRunner> task_runner,
61 base::TickClock* clock,
62 const VideoSenderConfig& video_config,
63 bool keep_frames)
64 : task_runner_(task_runner),
65 video_config_(video_config),
66 keep_frames_(keep_frames),
67 synthetic_count_(0),
68 clock_(clock),
69 audio_frame_count_(0),
70 video_frame_count_(0),
71 av_format_context_(NULL),
72 audio_stream_index_(-1),
73 playback_rate_(1.0),
74 video_stream_index_(-1),
75 video_frame_rate_numerator_(video_config.max_frame_rate),
76 video_frame_rate_denominator_(1),
77 video_first_pts_(0),
78 video_first_pts_set_(false),
79 weak_factory_(this) {
80 audio_bus_factory_.reset(new TestAudioBusFactory(kAudioChannels,
81 kAudioSamplingFrequency,
82 kSoundFrequency,
83 kSoundVolume));
86 FakeMediaSource::~FakeMediaSource() {
89 void FakeMediaSource::SetSourceFile(const base::FilePath& video_file,
90 int override_fps) {
91 DCHECK(!video_file.empty());
93 LOG(INFO) << "Source: " << video_file.value();
94 if (!file_data_.Initialize(video_file)) {
95 LOG(ERROR) << "Cannot load file.";
96 return;
98 protocol_.reset(
99 new InMemoryUrlProtocol(file_data_.data(), file_data_.length(), false));
100 glue_.reset(new FFmpegGlue(protocol_.get()));
102 if (!glue_->OpenContext()) {
103 LOG(ERROR) << "Cannot open file.";
104 return;
107 // AVFormatContext is owned by the glue.
108 av_format_context_ = glue_->format_context();
109 if (avformat_find_stream_info(av_format_context_, NULL) < 0) {
110 LOG(ERROR) << "Cannot find stream information.";
111 return;
114 // Prepare FFmpeg decoders.
115 for (unsigned int i = 0; i < av_format_context_->nb_streams; ++i) {
116 AVStream* av_stream = av_format_context_->streams[i];
117 AVCodecContext* av_codec_context = av_stream->codec;
118 AVCodec* av_codec = avcodec_find_decoder(av_codec_context->codec_id);
120 if (!av_codec) {
121 LOG(ERROR) << "Cannot find decoder for the codec: "
122 << av_codec_context->codec_id;
123 continue;
126 // Number of threads for decoding.
127 av_codec_context->thread_count = 2;
128 av_codec_context->error_concealment = FF_EC_GUESS_MVS | FF_EC_DEBLOCK;
129 av_codec_context->request_sample_fmt = AV_SAMPLE_FMT_S16;
131 if (avcodec_open2(av_codec_context, av_codec, NULL) < 0) {
132 LOG(ERROR) << "Cannot open AVCodecContext for the codec: "
133 << av_codec_context->codec_id;
134 return;
137 if (av_codec->type == AVMEDIA_TYPE_AUDIO) {
138 if (av_codec_context->sample_fmt == AV_SAMPLE_FMT_S16P) {
139 LOG(ERROR) << "Audio format not supported.";
140 continue;
142 ChannelLayout layout = ChannelLayoutToChromeChannelLayout(
143 av_codec_context->channel_layout,
144 av_codec_context->channels);
145 if (layout == CHANNEL_LAYOUT_UNSUPPORTED) {
146 LOG(ERROR) << "Unsupported audio channels layout.";
147 continue;
149 if (audio_stream_index_ != -1) {
150 LOG(WARNING) << "Found multiple audio streams.";
152 audio_stream_index_ = static_cast<int>(i);
153 audio_params_.Reset(
154 AudioParameters::AUDIO_PCM_LINEAR,
155 layout,
156 av_codec_context->channels,
157 av_codec_context->sample_rate,
158 8 * av_get_bytes_per_sample(av_codec_context->sample_fmt),
159 av_codec_context->sample_rate / kAudioPacketsPerSecond);
160 LOG(INFO) << "Source file has audio.";
161 } else if (av_codec->type == AVMEDIA_TYPE_VIDEO) {
162 VideoFrame::Format format =
163 PixelFormatToVideoFormat(av_codec_context->pix_fmt);
164 if (format != VideoFrame::YV12) {
165 LOG(ERROR) << "Cannot handle non YV12 video format: " << format;
166 continue;
168 if (video_stream_index_ != -1) {
169 LOG(WARNING) << "Found multiple video streams.";
171 video_stream_index_ = static_cast<int>(i);
172 if (override_fps > 0) {
173 // If video is played at a manual speed audio needs to match.
174 playback_rate_ = 1.0 * override_fps *
175 av_stream->r_frame_rate.den / av_stream->r_frame_rate.num;
176 video_frame_rate_numerator_ = override_fps;
177 video_frame_rate_denominator_ = 1;
178 } else {
179 playback_rate_ = 1.0;
180 video_frame_rate_numerator_ = av_stream->r_frame_rate.num;
181 video_frame_rate_denominator_ = av_stream->r_frame_rate.den;
183 LOG(INFO) << "Source file has video.";
184 } else {
185 LOG(ERROR) << "Unknown stream type; ignore.";
189 Rewind();
192 void FakeMediaSource::Start(scoped_refptr<AudioFrameInput> audio_frame_input,
193 scoped_refptr<VideoFrameInput> video_frame_input) {
194 audio_frame_input_ = audio_frame_input;
195 video_frame_input_ = video_frame_input;
197 LOG(INFO) << "Max Frame rate: " << video_config_.max_frame_rate;
198 LOG(INFO) << "Source Frame rate: "
199 << video_frame_rate_numerator_ << "/"
200 << video_frame_rate_denominator_ << " fps.";
201 LOG(INFO) << "Audio playback rate: " << playback_rate_;
203 if (start_time_.is_null())
204 start_time_ = clock_->NowTicks();
206 if (!is_transcoding_audio() && !is_transcoding_video()) {
207 // Send fake patterns.
208 task_runner_->PostTask(
209 FROM_HERE,
210 base::Bind(
211 &FakeMediaSource::SendNextFakeFrame,
212 base::Unretained(this)));
213 return;
216 // Send transcoding streams.
217 audio_algo_.Initialize(audio_params_);
218 audio_algo_.FlushBuffers();
219 audio_fifo_input_bus_ =
220 AudioBus::Create(
221 audio_params_.channels(), audio_params_.frames_per_buffer());
222 // Audio FIFO can carry all data fron AudioRendererAlgorithm.
223 audio_fifo_.reset(
224 new AudioFifo(audio_params_.channels(),
225 audio_algo_.QueueCapacity()));
226 audio_resampler_.reset(new media::MultiChannelResampler(
227 audio_params_.channels(),
228 static_cast<double>(audio_params_.sample_rate()) /
229 kAudioSamplingFrequency,
230 audio_params_.frames_per_buffer(),
231 base::Bind(&FakeMediaSource::ProvideData, base::Unretained(this))));
232 task_runner_->PostTask(
233 FROM_HERE,
234 base::Bind(
235 &FakeMediaSource::SendNextFrame,
236 base::Unretained(this)));
239 void FakeMediaSource::SendNextFakeFrame() {
240 gfx::Size size(video_config_.width, video_config_.height);
241 scoped_refptr<VideoFrame> video_frame =
242 VideoFrame::CreateBlackFrame(size);
243 PopulateVideoFrame(video_frame.get(), synthetic_count_);
244 ++synthetic_count_;
246 const base::TimeTicks now = clock_->NowTicks();
248 base::TimeDelta video_time = VideoFrameTime(++video_frame_count_);
249 video_frame->set_timestamp(video_time);
250 if (keep_frames_)
251 inserted_video_frame_queue_.push(video_frame);
252 video_frame_input_->InsertRawVideoFrame(video_frame,
253 start_time_ + video_time);
255 // Send just enough audio data to match next video frame's time.
256 base::TimeDelta audio_time = AudioFrameTime(audio_frame_count_);
257 while (audio_time < video_time) {
258 if (is_transcoding_audio()) {
259 Decode(true);
260 CHECK(!audio_bus_queue_.empty()) << "No audio decoded.";
261 scoped_ptr<AudioBus> bus(audio_bus_queue_.front());
262 audio_bus_queue_.pop();
263 audio_frame_input_->InsertAudio(
264 bus.Pass(), start_time_ + audio_time);
265 } else {
266 audio_frame_input_->InsertAudio(
267 audio_bus_factory_->NextAudioBus(
268 base::TimeDelta::FromMilliseconds(kAudioFrameMs)),
269 start_time_ + audio_time);
271 audio_time = AudioFrameTime(++audio_frame_count_);
274 // This is the time since FakeMediaSource was started.
275 const base::TimeDelta elapsed_time = now - start_time_;
277 // Handle the case when frame generation cannot keep up.
278 // Move the time ahead to match the next frame.
279 while (video_time < elapsed_time) {
280 LOG(WARNING) << "Skipping one frame.";
281 video_time = VideoFrameTime(++video_frame_count_);
284 task_runner_->PostDelayedTask(
285 FROM_HERE,
286 base::Bind(&FakeMediaSource::SendNextFakeFrame,
287 weak_factory_.GetWeakPtr()),
288 video_time - elapsed_time);
291 bool FakeMediaSource::SendNextTranscodedVideo(base::TimeDelta elapsed_time) {
292 if (!is_transcoding_video())
293 return false;
295 Decode(false);
296 if (video_frame_queue_.empty())
297 return false;
299 scoped_refptr<VideoFrame> decoded_frame =
300 video_frame_queue_.front();
301 if (elapsed_time < decoded_frame->timestamp())
302 return false;
304 gfx::Size size(video_config_.width, video_config_.height);
305 scoped_refptr<VideoFrame> video_frame =
306 VideoFrame::CreateBlackFrame(size);
307 video_frame_queue_.pop();
308 media::CopyPlane(VideoFrame::kYPlane,
309 decoded_frame->data(VideoFrame::kYPlane),
310 decoded_frame->stride(VideoFrame::kYPlane),
311 decoded_frame->rows(VideoFrame::kYPlane),
312 video_frame.get());
313 media::CopyPlane(VideoFrame::kUPlane,
314 decoded_frame->data(VideoFrame::kUPlane),
315 decoded_frame->stride(VideoFrame::kUPlane),
316 decoded_frame->rows(VideoFrame::kUPlane),
317 video_frame.get());
318 media::CopyPlane(VideoFrame::kVPlane,
319 decoded_frame->data(VideoFrame::kVPlane),
320 decoded_frame->stride(VideoFrame::kVPlane),
321 decoded_frame->rows(VideoFrame::kVPlane),
322 video_frame.get());
324 // Use the timestamp from the file if we're transcoding.
325 video_frame->set_timestamp(ScaleTimestamp(decoded_frame->timestamp()));
326 if (keep_frames_)
327 inserted_video_frame_queue_.push(video_frame);
328 video_frame_input_->InsertRawVideoFrame(
329 video_frame, start_time_ + video_frame->timestamp());
331 // Make sure queue is not empty.
332 Decode(false);
333 return true;
336 bool FakeMediaSource::SendNextTranscodedAudio(base::TimeDelta elapsed_time) {
337 if (!is_transcoding_audio())
338 return false;
340 Decode(true);
341 if (audio_bus_queue_.empty())
342 return false;
344 base::TimeDelta audio_time = audio_sent_ts_->GetTimestamp();
345 if (elapsed_time < audio_time)
346 return false;
347 scoped_ptr<AudioBus> bus(audio_bus_queue_.front());
348 audio_bus_queue_.pop();
349 audio_sent_ts_->AddFrames(bus->frames());
350 audio_frame_input_->InsertAudio(
351 bus.Pass(), start_time_ + audio_time);
353 // Make sure queue is not empty.
354 Decode(true);
355 return true;
358 void FakeMediaSource::SendNextFrame() {
359 // Send as much as possible. Audio is sent according to
360 // system time.
361 while (SendNextTranscodedAudio(clock_->NowTicks() - start_time_));
363 // Video is sync'ed to audio.
364 while (SendNextTranscodedVideo(audio_sent_ts_->GetTimestamp()));
366 if (audio_bus_queue_.empty() && video_frame_queue_.empty()) {
367 // Both queues are empty can only mean that we have reached
368 // the end of the stream.
369 LOG(INFO) << "Rewind.";
370 Rewind();
373 // Send next send.
374 task_runner_->PostDelayedTask(
375 FROM_HERE,
376 base::Bind(
377 &FakeMediaSource::SendNextFrame,
378 base::Unretained(this)),
379 base::TimeDelta::FromMilliseconds(kAudioFrameMs));
382 base::TimeDelta FakeMediaSource::VideoFrameTime(int frame_number) {
383 return frame_number * base::TimeDelta::FromSeconds(1) *
384 video_frame_rate_denominator_ / video_frame_rate_numerator_;
387 base::TimeDelta FakeMediaSource::ScaleTimestamp(base::TimeDelta timestamp) {
388 return base::TimeDelta::FromSecondsD(timestamp.InSecondsF() / playback_rate_);
391 base::TimeDelta FakeMediaSource::AudioFrameTime(int frame_number) {
392 return frame_number * base::TimeDelta::FromMilliseconds(kAudioFrameMs);
395 void FakeMediaSource::Rewind() {
396 CHECK(av_seek_frame(av_format_context_, -1, 0, AVSEEK_FLAG_BACKWARD) >= 0)
397 << "Failed to rewind to the beginning.";
400 ScopedAVPacket FakeMediaSource::DemuxOnePacket(bool* audio) {
401 ScopedAVPacket packet(new AVPacket());
402 if (av_read_frame(av_format_context_, packet.get()) < 0) {
403 VLOG(1) << "Failed to read one AVPacket.";
404 packet.reset();
405 return packet.Pass();
408 int stream_index = static_cast<int>(packet->stream_index);
409 if (stream_index == audio_stream_index_) {
410 *audio = true;
411 } else if (stream_index == video_stream_index_) {
412 *audio = false;
413 } else {
414 // Ignore unknown packet.
415 LOG(INFO) << "Unknown packet.";
416 packet.reset();
418 return packet.Pass();
421 void FakeMediaSource::DecodeAudio(ScopedAVPacket packet) {
422 // Audio.
423 AVFrame* avframe = av_frame_alloc();
425 // Make a shallow copy of packet so we can slide packet.data as frames are
426 // decoded from the packet; otherwise av_free_packet() will corrupt memory.
427 AVPacket packet_temp = *packet.get();
429 do {
430 int frame_decoded = 0;
431 int result = avcodec_decode_audio4(
432 av_audio_context(), avframe, &frame_decoded, &packet_temp);
433 CHECK(result >= 0) << "Failed to decode audio.";
434 packet_temp.size -= result;
435 packet_temp.data += result;
436 if (!frame_decoded)
437 continue;
439 int frames_read = avframe->nb_samples;
440 if (frames_read < 0)
441 break;
443 if (!audio_sent_ts_) {
444 // Initialize the base time to the first packet in the file.
445 // This is set to the frequency we send to the receiver.
446 // Not the frequency of the source file. This is because we
447 // increment the frame count by samples we sent.
448 audio_sent_ts_.reset(
449 new AudioTimestampHelper(kAudioSamplingFrequency));
450 // For some files this is an invalid value.
451 base::TimeDelta base_ts;
452 audio_sent_ts_->SetBaseTimestamp(base_ts);
455 scoped_refptr<AudioBuffer> buffer =
456 AudioBuffer::CopyFrom(
457 AVSampleFormatToSampleFormat(
458 av_audio_context()->sample_fmt),
459 ChannelLayoutToChromeChannelLayout(
460 av_audio_context()->channel_layout,
461 av_audio_context()->channels),
462 av_audio_context()->channels,
463 av_audio_context()->sample_rate,
464 frames_read,
465 &avframe->data[0],
466 PtsToTimeDelta(avframe->pkt_pts, av_audio_stream()->time_base));
467 audio_algo_.EnqueueBuffer(buffer);
468 av_frame_unref(avframe);
469 } while (packet_temp.size > 0);
470 av_frame_free(&avframe);
472 const int frames_needed_to_scale =
473 playback_rate_ * av_audio_context()->sample_rate /
474 kAudioPacketsPerSecond;
475 while (frames_needed_to_scale <= audio_algo_.frames_buffered()) {
476 if (!audio_algo_.FillBuffer(audio_fifo_input_bus_.get(), 0,
477 audio_fifo_input_bus_->frames(),
478 playback_rate_)) {
479 // Nothing can be scaled. Decode some more.
480 return;
483 // Prevent overflow of audio data in the FIFO.
484 if (audio_fifo_input_bus_->frames() + audio_fifo_->frames()
485 <= audio_fifo_->max_frames()) {
486 audio_fifo_->Push(audio_fifo_input_bus_.get());
487 } else {
488 LOG(WARNING) << "Audio FIFO full; dropping samples.";
491 // Make sure there's enough data to resample audio.
492 if (audio_fifo_->frames() <
493 2 * audio_params_.sample_rate() / kAudioPacketsPerSecond) {
494 continue;
497 scoped_ptr<media::AudioBus> resampled_bus(
498 media::AudioBus::Create(
499 audio_params_.channels(),
500 kAudioSamplingFrequency / kAudioPacketsPerSecond));
501 audio_resampler_->Resample(resampled_bus->frames(),
502 resampled_bus.get());
503 audio_bus_queue_.push(resampled_bus.release());
507 void FakeMediaSource::DecodeVideo(ScopedAVPacket packet) {
508 // Video.
509 int got_picture;
510 AVFrame* avframe = av_frame_alloc();
511 CHECK(avcodec_decode_video2(
512 av_video_context(), avframe, &got_picture, packet.get()) >= 0)
513 << "Video decode error.";
514 if (!got_picture) {
515 av_frame_free(&avframe);
516 return;
518 gfx::Size size(av_video_context()->width, av_video_context()->height);
520 if (!video_first_pts_set_) {
521 video_first_pts_ = avframe->pkt_pts;
522 video_first_pts_set_ = true;
524 const AVRational& time_base = av_video_stream()->time_base;
525 base::TimeDelta timestamp =
526 PtsToTimeDelta(avframe->pkt_pts - video_first_pts_, time_base);
527 if (timestamp < last_video_frame_timestamp_) {
528 // Stream has rewound. Rebase |video_first_pts_|.
529 const AVRational& frame_rate = av_video_stream()->r_frame_rate;
530 timestamp = last_video_frame_timestamp_ +
531 (base::TimeDelta::FromSeconds(1) * frame_rate.den / frame_rate.num);
532 const int64 adjustment_pts = TimeDeltaToPts(timestamp, time_base);
533 video_first_pts_ = avframe->pkt_pts - adjustment_pts;
536 video_frame_queue_.push(
537 VideoFrame::WrapExternalYuvData(
538 media::VideoFrame::YV12,
539 size,
540 gfx::Rect(size),
541 size,
542 avframe->linesize[0],
543 avframe->linesize[1],
544 avframe->linesize[2],
545 avframe->data[0],
546 avframe->data[1],
547 avframe->data[2],
548 timestamp,
549 base::Bind(&AVFreeFrame, avframe)));
550 last_video_frame_timestamp_ = timestamp;
553 void FakeMediaSource::Decode(bool decode_audio) {
554 // Read the stream until one video frame can be decoded.
555 while (true) {
556 if (decode_audio && !audio_bus_queue_.empty())
557 return;
558 if (!decode_audio && !video_frame_queue_.empty())
559 return;
561 bool audio_packet = false;
562 ScopedAVPacket packet = DemuxOnePacket(&audio_packet);
563 if (!packet) {
564 VLOG(1) << "End of stream.";
565 return;
568 if (audio_packet)
569 DecodeAudio(packet.Pass());
570 else
571 DecodeVideo(packet.Pass());
575 void FakeMediaSource::ProvideData(int frame_delay,
576 media::AudioBus* output_bus) {
577 if (audio_fifo_->frames() >= output_bus->frames()) {
578 audio_fifo_->Consume(output_bus, 0, output_bus->frames());
579 } else {
580 LOG(WARNING) << "Not enough audio data for resampling.";
581 output_bus->Zero();
585 scoped_refptr<media::VideoFrame>
586 FakeMediaSource::PopOldestInsertedVideoFrame() {
587 CHECK(!inserted_video_frame_queue_.empty());
588 scoped_refptr<media::VideoFrame> video_frame =
589 inserted_video_frame_queue_.front();
590 inserted_video_frame_queue_.pop();
591 return video_frame;
594 AVStream* FakeMediaSource::av_audio_stream() {
595 return av_format_context_->streams[audio_stream_index_];
598 AVStream* FakeMediaSource::av_video_stream() {
599 return av_format_context_->streams[video_stream_index_];
602 AVCodecContext* FakeMediaSource::av_audio_context() {
603 return av_audio_stream()->codec;
606 AVCodecContext* FakeMediaSource::av_video_context() {
607 return av_video_stream()->codec;
610 } // namespace cast
611 } // namespace media