Update V8 to version 4.7.42.
[chromium-blink-merge.git] / media / filters / chunk_demuxer_unittest.cc
bloba01a50b18b9e70d58edf91ad592f11342323360d
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include <algorithm>
7 #include "base/bind.h"
8 #include "base/message_loop/message_loop.h"
9 #include "base/strings/string_number_conversions.h"
10 #include "base/strings/string_split.h"
11 #include "base/strings/string_util.h"
12 #include "media/base/audio_decoder_config.h"
13 #include "media/base/decoder_buffer.h"
14 #include "media/base/decrypt_config.h"
15 #include "media/base/media_log.h"
16 #include "media/base/mock_demuxer_host.h"
17 #include "media/base/test_data_util.h"
18 #include "media/base/test_helpers.h"
19 #include "media/base/timestamp_constants.h"
20 #include "media/filters/chunk_demuxer.h"
21 #include "media/formats/webm/cluster_builder.h"
22 #include "media/formats/webm/webm_constants.h"
23 #include "testing/gtest/include/gtest/gtest.h"
25 using ::testing::AnyNumber;
26 using ::testing::Exactly;
27 using ::testing::InSequence;
28 using ::testing::NotNull;
29 using ::testing::Return;
30 using ::testing::SaveArg;
31 using ::testing::SetArgumentPointee;
32 using ::testing::_;
34 namespace media {
36 const uint8 kTracksHeader[] = {
37 0x16, 0x54, 0xAE, 0x6B, // Tracks ID
38 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // tracks(size = 0)
41 // WebM Block bytes that represent a VP8 key frame.
42 const uint8 kVP8Keyframe[] = {
43 0x010, 0x00, 0x00, 0x9d, 0x01, 0x2a, 0x00, 0x10, 0x00, 0x10, 0x00
46 // WebM Block bytes that represent a VP8 interframe.
47 const uint8 kVP8Interframe[] = { 0x11, 0x00, 0x00 };
49 const uint8 kCuesHeader[] = {
50 0x1C, 0x53, 0xBB, 0x6B, // Cues ID
51 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // cues(size = 0)
54 const uint8 kEncryptedMediaInitData[] = {
55 0x68, 0xFE, 0xF9, 0xA1, 0xB3, 0x0D, 0x6B, 0x4D,
56 0xF2, 0x22, 0xB5, 0x0B, 0x4D, 0xE9, 0xE9, 0x95,
59 const int kTracksHeaderSize = sizeof(kTracksHeader);
60 const int kTracksSizeOffset = 4;
62 // The size of TrackEntry element in test file "webm_vorbis_track_entry" starts
63 // at index 1 and spans 8 bytes.
64 const int kAudioTrackSizeOffset = 1;
65 const int kAudioTrackSizeWidth = 8;
66 const int kAudioTrackEntryHeaderSize =
67 kAudioTrackSizeOffset + kAudioTrackSizeWidth;
69 // The size of TrackEntry element in test file "webm_vp8_track_entry" starts at
70 // index 1 and spans 8 bytes.
71 const int kVideoTrackSizeOffset = 1;
72 const int kVideoTrackSizeWidth = 8;
73 const int kVideoTrackEntryHeaderSize =
74 kVideoTrackSizeOffset + kVideoTrackSizeWidth;
76 const int kVideoTrackNum = 1;
77 const int kAudioTrackNum = 2;
78 const int kTextTrackNum = 3;
79 const int kAlternateTextTrackNum = 4;
81 const int kAudioBlockDuration = 23;
82 const int kVideoBlockDuration = 33;
83 const int kTextBlockDuration = 100;
84 const int kBlockSize = 10;
86 const char kSourceId[] = "SourceId";
87 const char kDefaultFirstClusterRange[] = "{ [0,46) }";
88 const int kDefaultFirstClusterEndTimestamp = 66;
89 const int kDefaultSecondClusterEndTimestamp = 132;
91 base::TimeDelta kDefaultDuration() {
92 return base::TimeDelta::FromMilliseconds(201224);
95 // Write an integer into buffer in the form of vint that spans 8 bytes.
96 // The data pointed by |buffer| should be at least 8 bytes long.
97 // |number| should be in the range 0 <= number < 0x00FFFFFFFFFFFFFF.
98 static void WriteInt64(uint8* buffer, int64 number) {
99 DCHECK(number >= 0 && number < 0x00FFFFFFFFFFFFFFLL);
100 buffer[0] = 0x01;
101 int64 tmp = number;
102 for (int i = 7; i > 0; i--) {
103 buffer[i] = tmp & 0xff;
104 tmp >>= 8;
108 MATCHER_P(HasTimestamp, timestamp_in_ms, "") {
109 return arg.get() && !arg->end_of_stream() &&
110 arg->timestamp().InMilliseconds() == timestamp_in_ms;
113 MATCHER(IsEndOfStream, "") { return arg.get() && arg->end_of_stream(); }
115 static void OnReadDone(const base::TimeDelta& expected_time,
116 bool* called,
117 DemuxerStream::Status status,
118 const scoped_refptr<DecoderBuffer>& buffer) {
119 EXPECT_EQ(status, DemuxerStream::kOk);
120 EXPECT_EQ(expected_time, buffer->timestamp());
121 *called = true;
124 static void OnReadDone_AbortExpected(
125 bool* called, DemuxerStream::Status status,
126 const scoped_refptr<DecoderBuffer>& buffer) {
127 EXPECT_EQ(status, DemuxerStream::kAborted);
128 EXPECT_EQ(NULL, buffer.get());
129 *called = true;
132 static void OnReadDone_EOSExpected(bool* called,
133 DemuxerStream::Status status,
134 const scoped_refptr<DecoderBuffer>& buffer) {
135 EXPECT_EQ(status, DemuxerStream::kOk);
136 EXPECT_TRUE(buffer->end_of_stream());
137 *called = true;
140 static void OnSeekDone_OKExpected(bool* called, PipelineStatus status) {
141 EXPECT_EQ(status, PIPELINE_OK);
142 *called = true;
145 class ChunkDemuxerTest : public ::testing::Test {
146 protected:
147 enum CodecsIndex {
148 AUDIO,
149 VIDEO,
150 MAX_CODECS_INDEX
153 // Default cluster to append first for simple tests.
154 scoped_ptr<Cluster> kDefaultFirstCluster() {
155 return GenerateCluster(0, 4);
158 // Default cluster to append after kDefaultFirstCluster()
159 // has been appended. This cluster starts with blocks that
160 // have timestamps consistent with the end times of the blocks
161 // in kDefaultFirstCluster() so that these two clusters represent
162 // a continuous region.
163 scoped_ptr<Cluster> kDefaultSecondCluster() {
164 return GenerateCluster(46, 66, 5);
167 ChunkDemuxerTest()
168 : append_window_end_for_next_append_(kInfiniteDuration()) {
169 init_segment_received_cb_ =
170 base::Bind(&ChunkDemuxerTest::InitSegmentReceived,
171 base::Unretained(this));
172 CreateNewDemuxer();
175 void CreateNewDemuxer() {
176 base::Closure open_cb =
177 base::Bind(&ChunkDemuxerTest::DemuxerOpened, base::Unretained(this));
178 Demuxer::EncryptedMediaInitDataCB encrypted_media_init_data_cb = base::Bind(
179 &ChunkDemuxerTest::OnEncryptedMediaInitData, base::Unretained(this));
180 demuxer_.reset(new ChunkDemuxer(open_cb, encrypted_media_init_data_cb,
181 scoped_refptr<MediaLog>(new MediaLog()),
182 true));
185 virtual ~ChunkDemuxerTest() {
186 ShutdownDemuxer();
189 void CreateInitSegment(int stream_flags,
190 bool is_audio_encrypted,
191 bool is_video_encrypted,
192 scoped_ptr<uint8[]>* buffer,
193 int* size) {
194 CreateInitSegmentInternal(
195 stream_flags, is_audio_encrypted, is_video_encrypted, buffer, false,
196 size);
199 void CreateInitSegmentWithAlternateTextTrackNum(int stream_flags,
200 bool is_audio_encrypted,
201 bool is_video_encrypted,
202 scoped_ptr<uint8[]>* buffer,
203 int* size) {
204 DCHECK(stream_flags & HAS_TEXT);
205 CreateInitSegmentInternal(
206 stream_flags, is_audio_encrypted, is_video_encrypted, buffer, true,
207 size);
210 void CreateInitSegmentInternal(int stream_flags,
211 bool is_audio_encrypted,
212 bool is_video_encrypted,
213 scoped_ptr<uint8[]>* buffer,
214 bool use_alternate_text_track_id,
215 int* size) {
216 bool has_audio = (stream_flags & HAS_AUDIO) != 0;
217 bool has_video = (stream_flags & HAS_VIDEO) != 0;
218 bool has_text = (stream_flags & HAS_TEXT) != 0;
219 scoped_refptr<DecoderBuffer> ebml_header;
220 scoped_refptr<DecoderBuffer> info;
221 scoped_refptr<DecoderBuffer> audio_track_entry;
222 scoped_refptr<DecoderBuffer> video_track_entry;
223 scoped_refptr<DecoderBuffer> audio_content_encodings;
224 scoped_refptr<DecoderBuffer> video_content_encodings;
225 scoped_refptr<DecoderBuffer> text_track_entry;
227 ebml_header = ReadTestDataFile("webm_ebml_element");
229 info = ReadTestDataFile("webm_info_element");
231 int tracks_element_size = 0;
233 if (has_audio) {
234 audio_track_entry = ReadTestDataFile("webm_vorbis_track_entry");
235 tracks_element_size += audio_track_entry->data_size();
236 if (is_audio_encrypted) {
237 audio_content_encodings = ReadTestDataFile("webm_content_encodings");
238 tracks_element_size += audio_content_encodings->data_size();
242 if (has_video) {
243 video_track_entry = ReadTestDataFile("webm_vp8_track_entry");
244 tracks_element_size += video_track_entry->data_size();
245 if (is_video_encrypted) {
246 video_content_encodings = ReadTestDataFile("webm_content_encodings");
247 tracks_element_size += video_content_encodings->data_size();
251 if (has_text) {
252 // TODO(matthewjheaney): create an abstraction to do
253 // this (http://crbug/321454).
254 // We need it to also handle the creation of multiple text tracks.
256 // This is the track entry for a text track,
257 // TrackEntry [AE], size=30
258 // TrackNum [D7], size=1, val=3 (or 4 if use_alternate_text_track_id)
259 // TrackUID [73] [C5], size=1, value=3 (must remain constant for same
260 // track, even if TrackNum changes)
261 // TrackType [83], size=1, val=0x11
262 // CodecId [86], size=18, val="D_WEBVTT/SUBTITLES"
263 char str[] = "\xAE\x9E\xD7\x81\x03\x73\xC5\x81\x03"
264 "\x83\x81\x11\x86\x92"
265 "D_WEBVTT/SUBTITLES";
266 DCHECK_EQ(str[4], kTextTrackNum);
267 if (use_alternate_text_track_id)
268 str[4] = kAlternateTextTrackNum;
270 const int len = strlen(str);
271 DCHECK_EQ(len, 32);
272 const uint8* const buf = reinterpret_cast<const uint8*>(str);
273 text_track_entry = DecoderBuffer::CopyFrom(buf, len);
274 tracks_element_size += text_track_entry->data_size();
277 *size = ebml_header->data_size() + info->data_size() +
278 kTracksHeaderSize + tracks_element_size;
280 buffer->reset(new uint8[*size]);
282 uint8* buf = buffer->get();
283 memcpy(buf, ebml_header->data(), ebml_header->data_size());
284 buf += ebml_header->data_size();
286 memcpy(buf, info->data(), info->data_size());
287 buf += info->data_size();
289 memcpy(buf, kTracksHeader, kTracksHeaderSize);
290 WriteInt64(buf + kTracksSizeOffset, tracks_element_size);
291 buf += kTracksHeaderSize;
293 // TODO(xhwang): Simplify this! Probably have test data files that contain
294 // ContentEncodings directly instead of trying to create one at run-time.
295 if (has_audio) {
296 memcpy(buf, audio_track_entry->data(),
297 audio_track_entry->data_size());
298 if (is_audio_encrypted) {
299 memcpy(buf + audio_track_entry->data_size(),
300 audio_content_encodings->data(),
301 audio_content_encodings->data_size());
302 WriteInt64(buf + kAudioTrackSizeOffset,
303 audio_track_entry->data_size() +
304 audio_content_encodings->data_size() -
305 kAudioTrackEntryHeaderSize);
306 buf += audio_content_encodings->data_size();
308 buf += audio_track_entry->data_size();
311 if (has_video) {
312 memcpy(buf, video_track_entry->data(),
313 video_track_entry->data_size());
314 if (is_video_encrypted) {
315 memcpy(buf + video_track_entry->data_size(),
316 video_content_encodings->data(),
317 video_content_encodings->data_size());
318 WriteInt64(buf + kVideoTrackSizeOffset,
319 video_track_entry->data_size() +
320 video_content_encodings->data_size() -
321 kVideoTrackEntryHeaderSize);
322 buf += video_content_encodings->data_size();
324 buf += video_track_entry->data_size();
327 if (has_text) {
328 memcpy(buf, text_track_entry->data(),
329 text_track_entry->data_size());
330 buf += text_track_entry->data_size();
334 ChunkDemuxer::Status AddId() {
335 return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
338 ChunkDemuxer::Status AddId(const std::string& source_id, int stream_flags) {
339 bool has_audio = (stream_flags & HAS_AUDIO) != 0;
340 bool has_video = (stream_flags & HAS_VIDEO) != 0;
341 std::vector<std::string> codecs;
342 std::string type;
344 if (has_audio) {
345 codecs.push_back("vorbis");
346 type = "audio/webm";
349 if (has_video) {
350 codecs.push_back("vp8");
351 type = "video/webm";
354 if (!has_audio && !has_video) {
355 return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
358 return demuxer_->AddId(source_id, type, codecs);
361 ChunkDemuxer::Status AddIdForMp2tSource(const std::string& source_id) {
362 std::vector<std::string> codecs;
363 std::string type = "video/mp2t";
364 codecs.push_back("mp4a.40.2");
365 codecs.push_back("avc1.640028");
366 return demuxer_->AddId(source_id, type, codecs);
369 void AppendData(const uint8* data, size_t length) {
370 AppendData(kSourceId, data, length);
373 void AppendCluster(const std::string& source_id,
374 scoped_ptr<Cluster> cluster) {
375 AppendData(source_id, cluster->data(), cluster->size());
378 void AppendCluster(scoped_ptr<Cluster> cluster) {
379 AppendCluster(kSourceId, cluster.Pass());
382 void AppendCluster(int timecode, int block_count) {
383 AppendCluster(GenerateCluster(timecode, block_count));
386 void AppendSingleStreamCluster(const std::string& source_id, int track_number,
387 int timecode, int block_count) {
388 int block_duration = 0;
389 switch (track_number) {
390 case kVideoTrackNum:
391 block_duration = kVideoBlockDuration;
392 break;
393 case kAudioTrackNum:
394 block_duration = kAudioBlockDuration;
395 break;
396 case kTextTrackNum: // Fall-through.
397 case kAlternateTextTrackNum:
398 block_duration = kTextBlockDuration;
399 break;
401 ASSERT_NE(block_duration, 0);
402 int end_timecode = timecode + block_count * block_duration;
403 AppendCluster(source_id,
404 GenerateSingleStreamCluster(
405 timecode, end_timecode, track_number, block_duration));
408 struct BlockInfo {
409 BlockInfo()
410 : track_number(0),
411 timestamp_in_ms(0),
412 flags(0),
413 duration(0) {
416 BlockInfo(int tn, int ts, int f, int d)
417 : track_number(tn),
418 timestamp_in_ms(ts),
419 flags(f),
420 duration(d) {
423 int track_number;
424 int timestamp_in_ms;
425 int flags;
426 int duration;
428 bool operator< (const BlockInfo& rhs) const {
429 return timestamp_in_ms < rhs.timestamp_in_ms;
433 // |track_number| - The track number to place in
434 // |block_descriptions| - A space delimited string of block info that
435 // is used to populate |blocks|. Each block info has a timestamp in
436 // milliseconds and optionally followed by a 'K' to indicate that a block
437 // should be marked as a key frame. For example "0K 30 60" should populate
438 // |blocks| with 3 BlockInfo objects: a key frame with timestamp 0 and 2
439 // non-key-frames at 30ms and 60ms.
440 void ParseBlockDescriptions(int track_number,
441 const std::string block_descriptions,
442 std::vector<BlockInfo>* blocks) {
443 std::vector<std::string> timestamps = base::SplitString(
444 block_descriptions, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
446 for (size_t i = 0; i < timestamps.size(); ++i) {
447 std::string timestamp_str = timestamps[i];
448 BlockInfo block_info;
449 block_info.track_number = track_number;
450 block_info.flags = 0;
451 block_info.duration = 0;
453 if (base::EndsWith(timestamp_str, "K", base::CompareCase::SENSITIVE)) {
454 block_info.flags = kWebMFlagKeyframe;
455 // Remove the "K" off of the token.
456 timestamp_str = timestamp_str.substr(0, timestamps[i].length() - 1);
458 CHECK(base::StringToInt(timestamp_str, &block_info.timestamp_in_ms));
460 if (track_number == kTextTrackNum ||
461 track_number == kAlternateTextTrackNum) {
462 block_info.duration = kTextBlockDuration;
463 ASSERT_EQ(kWebMFlagKeyframe, block_info.flags)
464 << "Text block with timestamp " << block_info.timestamp_in_ms
465 << " was not marked as a key frame."
466 << " All text blocks must be key frames";
469 if (track_number == kAudioTrackNum)
470 ASSERT_TRUE(block_info.flags & kWebMFlagKeyframe);
472 blocks->push_back(block_info);
476 scoped_ptr<Cluster> GenerateCluster(const std::vector<BlockInfo>& blocks,
477 bool unknown_size) {
478 DCHECK_GT(blocks.size(), 0u);
479 ClusterBuilder cb;
481 std::vector<uint8> data(10);
482 for (size_t i = 0; i < blocks.size(); ++i) {
483 if (i == 0)
484 cb.SetClusterTimecode(blocks[i].timestamp_in_ms);
486 if (blocks[i].duration) {
487 if (blocks[i].track_number == kVideoTrackNum) {
488 AddVideoBlockGroup(&cb,
489 blocks[i].track_number, blocks[i].timestamp_in_ms,
490 blocks[i].duration, blocks[i].flags);
491 } else {
492 cb.AddBlockGroup(blocks[i].track_number, blocks[i].timestamp_in_ms,
493 blocks[i].duration, blocks[i].flags,
494 &data[0], data.size());
496 } else {
497 cb.AddSimpleBlock(blocks[i].track_number, blocks[i].timestamp_in_ms,
498 blocks[i].flags,
499 &data[0], data.size());
503 return unknown_size ? cb.FinishWithUnknownSize() : cb.Finish();
506 scoped_ptr<Cluster> GenerateCluster(
507 std::priority_queue<BlockInfo> block_queue,
508 bool unknown_size) {
509 std::vector<BlockInfo> blocks(block_queue.size());
510 for (size_t i = block_queue.size() - 1; !block_queue.empty(); --i) {
511 blocks[i] = block_queue.top();
512 block_queue.pop();
515 return GenerateCluster(blocks, unknown_size);
518 // |block_descriptions| - The block descriptions used to construct the
519 // cluster. See the documentation for ParseBlockDescriptions() for details on
520 // the string format.
521 void AppendSingleStreamCluster(const std::string& source_id, int track_number,
522 const std::string& block_descriptions) {
523 std::vector<BlockInfo> blocks;
524 ParseBlockDescriptions(track_number, block_descriptions, &blocks);
525 AppendCluster(source_id, GenerateCluster(blocks, false));
528 struct MuxedStreamInfo {
529 MuxedStreamInfo()
530 : track_number(0),
531 block_descriptions("")
534 MuxedStreamInfo(int track_num, const char* block_desc)
535 : track_number(track_num),
536 block_descriptions(block_desc) {
539 int track_number;
540 // The block description passed to ParseBlockDescriptions().
541 // See the documentation for that method for details on the string format.
542 const char* block_descriptions;
545 void AppendMuxedCluster(const MuxedStreamInfo& msi_1,
546 const MuxedStreamInfo& msi_2) {
547 std::vector<MuxedStreamInfo> msi(2);
548 msi[0] = msi_1;
549 msi[1] = msi_2;
550 AppendMuxedCluster(msi);
553 void AppendMuxedCluster(const MuxedStreamInfo& msi_1,
554 const MuxedStreamInfo& msi_2,
555 const MuxedStreamInfo& msi_3) {
556 std::vector<MuxedStreamInfo> msi(3);
557 msi[0] = msi_1;
558 msi[1] = msi_2;
559 msi[2] = msi_3;
560 AppendMuxedCluster(msi);
563 void AppendMuxedCluster(const std::vector<MuxedStreamInfo> msi) {
564 std::priority_queue<BlockInfo> block_queue;
565 for (size_t i = 0; i < msi.size(); ++i) {
566 std::vector<BlockInfo> track_blocks;
567 ParseBlockDescriptions(msi[i].track_number, msi[i].block_descriptions,
568 &track_blocks);
570 for (size_t j = 0; j < track_blocks.size(); ++j)
571 block_queue.push(track_blocks[j]);
574 AppendCluster(kSourceId, GenerateCluster(block_queue, false));
577 void AppendData(const std::string& source_id,
578 const uint8* data, size_t length) {
579 EXPECT_CALL(host_, AddBufferedTimeRange(_, _)).Times(AnyNumber());
581 demuxer_->AppendData(source_id, data, length,
582 append_window_start_for_next_append_,
583 append_window_end_for_next_append_,
584 &timestamp_offset_map_[source_id],
585 init_segment_received_cb_);
588 void AppendDataInPieces(const uint8* data, size_t length) {
589 AppendDataInPieces(data, length, 7);
592 void AppendDataInPieces(const uint8* data, size_t length, size_t piece_size) {
593 const uint8* start = data;
594 const uint8* end = data + length;
595 while (start < end) {
596 size_t append_size = std::min(piece_size,
597 static_cast<size_t>(end - start));
598 AppendData(start, append_size);
599 start += append_size;
603 void AppendInitSegment(int stream_flags) {
604 AppendInitSegmentWithSourceId(kSourceId, stream_flags);
607 void AppendInitSegmentWithSourceId(const std::string& source_id,
608 int stream_flags) {
609 AppendInitSegmentWithEncryptedInfo(source_id, stream_flags, false, false);
612 void AppendInitSegmentWithEncryptedInfo(const std::string& source_id,
613 int stream_flags,
614 bool is_audio_encrypted,
615 bool is_video_encrypted) {
616 scoped_ptr<uint8[]> info_tracks;
617 int info_tracks_size = 0;
618 CreateInitSegment(stream_flags,
619 is_audio_encrypted, is_video_encrypted,
620 &info_tracks, &info_tracks_size);
621 AppendData(source_id, info_tracks.get(), info_tracks_size);
624 void AppendGarbage() {
625 // Fill up an array with gibberish.
626 int garbage_cluster_size = 10;
627 scoped_ptr<uint8[]> garbage_cluster(new uint8[garbage_cluster_size]);
628 for (int i = 0; i < garbage_cluster_size; ++i)
629 garbage_cluster[i] = i;
630 AppendData(garbage_cluster.get(), garbage_cluster_size);
633 void InitDoneCalled(PipelineStatus expected_status,
634 PipelineStatus status) {
635 EXPECT_EQ(status, expected_status);
638 void AppendEmptyCluster(int timecode) {
639 AppendCluster(GenerateEmptyCluster(timecode));
642 PipelineStatusCB CreateInitDoneCB(const base::TimeDelta& expected_duration,
643 PipelineStatus expected_status) {
644 if (expected_duration != kNoTimestamp())
645 EXPECT_CALL(host_, SetDuration(expected_duration));
646 return CreateInitDoneCB(expected_status);
649 PipelineStatusCB CreateInitDoneCB(PipelineStatus expected_status) {
650 return base::Bind(&ChunkDemuxerTest::InitDoneCalled,
651 base::Unretained(this),
652 expected_status);
655 enum StreamFlags {
656 HAS_AUDIO = 1 << 0,
657 HAS_VIDEO = 1 << 1,
658 HAS_TEXT = 1 << 2
661 bool InitDemuxer(int stream_flags) {
662 return InitDemuxerWithEncryptionInfo(stream_flags, false, false);
665 bool InitDemuxerWithEncryptionInfo(
666 int stream_flags, bool is_audio_encrypted, bool is_video_encrypted) {
668 PipelineStatus expected_status =
669 (stream_flags != 0) ? PIPELINE_OK : PIPELINE_ERROR_DECODE;
671 base::TimeDelta expected_duration = kNoTimestamp();
672 if (expected_status == PIPELINE_OK)
673 expected_duration = kDefaultDuration();
675 EXPECT_CALL(*this, DemuxerOpened());
677 // Adding expectation prior to CreateInitDoneCB() here because InSequence
678 // tests require init segment received before duration set. Also, only
679 // expect an init segment received callback if there is actually a track in
680 // it.
681 if (stream_flags != 0)
682 EXPECT_CALL(*this, InitSegmentReceived());
684 demuxer_->Initialize(
685 &host_, CreateInitDoneCB(expected_duration, expected_status), true);
687 if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
688 return false;
690 AppendInitSegmentWithEncryptedInfo(
691 kSourceId, stream_flags,
692 is_audio_encrypted, is_video_encrypted);
693 return true;
696 bool InitDemuxerAudioAndVideoSourcesText(const std::string& audio_id,
697 const std::string& video_id,
698 bool has_text) {
699 EXPECT_CALL(*this, DemuxerOpened());
700 demuxer_->Initialize(
701 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
703 if (AddId(audio_id, HAS_AUDIO) != ChunkDemuxer::kOk)
704 return false;
705 if (AddId(video_id, HAS_VIDEO) != ChunkDemuxer::kOk)
706 return false;
708 int audio_flags = HAS_AUDIO;
709 int video_flags = HAS_VIDEO;
711 if (has_text) {
712 audio_flags |= HAS_TEXT;
713 video_flags |= HAS_TEXT;
716 EXPECT_CALL(*this, InitSegmentReceived());
717 AppendInitSegmentWithSourceId(audio_id, audio_flags);
718 EXPECT_CALL(*this, InitSegmentReceived());
719 AppendInitSegmentWithSourceId(video_id, video_flags);
720 return true;
723 bool InitDemuxerAudioAndVideoSources(const std::string& audio_id,
724 const std::string& video_id) {
725 return InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, false);
728 // Initializes the demuxer with data from 2 files with different
729 // decoder configurations. This is used to test the decoder config change
730 // logic.
732 // bear-320x240.webm VideoDecoderConfig returns 320x240 for its natural_size()
733 // bear-640x360.webm VideoDecoderConfig returns 640x360 for its natural_size()
734 // The resulting video stream returns data from each file for the following
735 // time ranges.
736 // bear-320x240.webm : [0-501) [801-2736)
737 // bear-640x360.webm : [527-793)
739 // bear-320x240.webm AudioDecoderConfig returns 3863 for its extra_data_size()
740 // bear-640x360.webm AudioDecoderConfig returns 3935 for its extra_data_size()
741 // The resulting audio stream returns data from each file for the following
742 // time ranges.
743 // bear-320x240.webm : [0-524) [779-2736)
744 // bear-640x360.webm : [527-759)
745 bool InitDemuxerWithConfigChangeData() {
746 scoped_refptr<DecoderBuffer> bear1 = ReadTestDataFile("bear-320x240.webm");
747 scoped_refptr<DecoderBuffer> bear2 = ReadTestDataFile("bear-640x360.webm");
749 EXPECT_CALL(*this, DemuxerOpened());
751 // Adding expectation prior to CreateInitDoneCB() here because InSequence
752 // tests require init segment received before duration set.
753 EXPECT_CALL(*this, InitSegmentReceived());
754 demuxer_->Initialize(
755 &host_, CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744),
756 PIPELINE_OK), true);
758 if (AddId(kSourceId, HAS_AUDIO | HAS_VIDEO) != ChunkDemuxer::kOk)
759 return false;
761 // Append the whole bear1 file.
762 // TODO(wolenetz/acolwell): Remove this extra SetDuration expectation once
763 // the files are fixed to have the correct duration in their init segments,
764 // and the CreateInitDoneCB() call, above, is fixed to used that duration.
765 // See http://crbug.com/354284.
766 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
767 AppendData(bear1->data(), bear1->data_size());
768 // Last audio frame has timestamp 2721 and duration 24 (estimated from max
769 // seen so far for audio track).
770 // Last video frame has timestamp 2703 and duration 33 (from TrackEntry
771 // DefaultDuration for video track).
772 CheckExpectedRanges(kSourceId, "{ [0,2736) }");
774 // Append initialization segment for bear2.
775 // Note: Offsets here and below are derived from
776 // media/test/data/bear-640x360-manifest.js and
777 // media/test/data/bear-320x240-manifest.js which were
778 // generated from media/test/data/bear-640x360.webm and
779 // media/test/data/bear-320x240.webm respectively.
780 EXPECT_CALL(*this, InitSegmentReceived());
781 AppendData(bear2->data(), 4340);
783 // Append a media segment that goes from [0.527000, 1.014000).
784 AppendData(bear2->data() + 55290, 18785);
785 CheckExpectedRanges(kSourceId, "{ [0,1027) [1201,2736) }");
787 // Append initialization segment for bear1 & fill gap with [779-1197)
788 // segment.
789 EXPECT_CALL(*this, InitSegmentReceived());
790 AppendData(bear1->data(), 4370);
791 AppendData(bear1->data() + 72737, 28183);
792 CheckExpectedRanges(kSourceId, "{ [0,2736) }");
794 MarkEndOfStream(PIPELINE_OK);
795 return true;
798 void ShutdownDemuxer() {
799 if (demuxer_) {
800 demuxer_->Shutdown();
801 message_loop_.RunUntilIdle();
805 void AddSimpleBlock(ClusterBuilder* cb, int track_num, int64 timecode) {
806 uint8 data[] = { 0x00 };
807 cb->AddSimpleBlock(track_num, timecode, 0, data, sizeof(data));
810 scoped_ptr<Cluster> GenerateCluster(int timecode, int block_count) {
811 return GenerateCluster(timecode, timecode, block_count);
814 void AddVideoBlockGroup(ClusterBuilder* cb, int track_num, int64 timecode,
815 int duration, int flags) {
816 const uint8* data =
817 (flags & kWebMFlagKeyframe) != 0 ? kVP8Keyframe : kVP8Interframe;
818 int size = (flags & kWebMFlagKeyframe) != 0 ? sizeof(kVP8Keyframe) :
819 sizeof(kVP8Interframe);
820 cb->AddBlockGroup(track_num, timecode, duration, flags, data, size);
823 scoped_ptr<Cluster> GenerateCluster(int first_audio_timecode,
824 int first_video_timecode,
825 int block_count) {
826 return GenerateCluster(first_audio_timecode, first_video_timecode,
827 block_count, false);
829 scoped_ptr<Cluster> GenerateCluster(int first_audio_timecode,
830 int first_video_timecode,
831 int block_count,
832 bool unknown_size) {
833 CHECK_GT(block_count, 0);
835 std::priority_queue<BlockInfo> block_queue;
837 if (block_count == 1) {
838 block_queue.push(BlockInfo(kAudioTrackNum,
839 first_audio_timecode,
840 kWebMFlagKeyframe,
841 kAudioBlockDuration));
842 return GenerateCluster(block_queue, unknown_size);
845 int audio_timecode = first_audio_timecode;
846 int video_timecode = first_video_timecode;
848 // Create simple blocks for everything except the last 2 blocks.
849 // The first video frame must be a key frame.
850 uint8 video_flag = kWebMFlagKeyframe;
851 for (int i = 0; i < block_count - 2; i++) {
852 if (audio_timecode <= video_timecode) {
853 block_queue.push(BlockInfo(kAudioTrackNum,
854 audio_timecode,
855 kWebMFlagKeyframe,
856 0));
857 audio_timecode += kAudioBlockDuration;
858 continue;
861 block_queue.push(BlockInfo(kVideoTrackNum,
862 video_timecode,
863 video_flag,
864 0));
865 video_timecode += kVideoBlockDuration;
866 video_flag = 0;
869 // Make the last 2 blocks BlockGroups so that they don't get delayed by the
870 // block duration calculation logic.
871 block_queue.push(BlockInfo(kAudioTrackNum,
872 audio_timecode,
873 kWebMFlagKeyframe,
874 kAudioBlockDuration));
875 block_queue.push(BlockInfo(kVideoTrackNum,
876 video_timecode,
877 video_flag,
878 kVideoBlockDuration));
880 return GenerateCluster(block_queue, unknown_size);
883 scoped_ptr<Cluster> GenerateSingleStreamCluster(int timecode,
884 int end_timecode,
885 int track_number,
886 int block_duration) {
887 CHECK_GT(end_timecode, timecode);
889 std::vector<uint8> data(kBlockSize);
891 ClusterBuilder cb;
892 cb.SetClusterTimecode(timecode);
894 // Create simple blocks for everything except the last block.
895 while (timecode < (end_timecode - block_duration)) {
896 cb.AddSimpleBlock(track_number, timecode, kWebMFlagKeyframe,
897 &data[0], data.size());
898 timecode += block_duration;
901 if (track_number == kVideoTrackNum) {
902 AddVideoBlockGroup(&cb, track_number, timecode, block_duration,
903 kWebMFlagKeyframe);
904 } else {
905 cb.AddBlockGroup(track_number, timecode, block_duration,
906 kWebMFlagKeyframe, &data[0], data.size());
909 return cb.Finish();
912 void Read(DemuxerStream::Type type, const DemuxerStream::ReadCB& read_cb) {
913 demuxer_->GetStream(type)->Read(read_cb);
914 message_loop_.RunUntilIdle();
917 void ReadAudio(const DemuxerStream::ReadCB& read_cb) {
918 Read(DemuxerStream::AUDIO, read_cb);
921 void ReadVideo(const DemuxerStream::ReadCB& read_cb) {
922 Read(DemuxerStream::VIDEO, read_cb);
925 void GenerateExpectedReads(int timecode, int block_count) {
926 GenerateExpectedReads(timecode, timecode, block_count);
929 void GenerateExpectedReads(int start_audio_timecode,
930 int start_video_timecode,
931 int block_count) {
932 CHECK_GT(block_count, 0);
934 if (block_count == 1) {
935 ExpectRead(DemuxerStream::AUDIO, start_audio_timecode);
936 return;
939 int audio_timecode = start_audio_timecode;
940 int video_timecode = start_video_timecode;
942 for (int i = 0; i < block_count; i++) {
943 if (audio_timecode <= video_timecode) {
944 ExpectRead(DemuxerStream::AUDIO, audio_timecode);
945 audio_timecode += kAudioBlockDuration;
946 continue;
949 ExpectRead(DemuxerStream::VIDEO, video_timecode);
950 video_timecode += kVideoBlockDuration;
954 void GenerateSingleStreamExpectedReads(int timecode,
955 int block_count,
956 DemuxerStream::Type type,
957 int block_duration) {
958 CHECK_GT(block_count, 0);
959 int stream_timecode = timecode;
961 for (int i = 0; i < block_count; i++) {
962 ExpectRead(type, stream_timecode);
963 stream_timecode += block_duration;
967 void GenerateAudioStreamExpectedReads(int timecode, int block_count) {
968 GenerateSingleStreamExpectedReads(
969 timecode, block_count, DemuxerStream::AUDIO, kAudioBlockDuration);
972 void GenerateVideoStreamExpectedReads(int timecode, int block_count) {
973 GenerateSingleStreamExpectedReads(
974 timecode, block_count, DemuxerStream::VIDEO, kVideoBlockDuration);
977 scoped_ptr<Cluster> GenerateEmptyCluster(int timecode) {
978 ClusterBuilder cb;
979 cb.SetClusterTimecode(timecode);
980 return cb.Finish();
983 void CheckExpectedRanges(const std::string& expected) {
984 CheckExpectedRanges(kSourceId, expected);
987 void CheckExpectedRanges(const std::string& id,
988 const std::string& expected) {
989 CheckExpectedRanges(demuxer_->GetBufferedRanges(id), expected);
992 void CheckExpectedRanges(DemuxerStream::Type type,
993 const std::string& expected) {
994 ChunkDemuxerStream* stream =
995 static_cast<ChunkDemuxerStream*>(demuxer_->GetStream(type));
996 CheckExpectedRanges(stream->GetBufferedRanges(kDefaultDuration()),
997 expected);
1000 void CheckExpectedRanges(const Ranges<base::TimeDelta>& r,
1001 const std::string& expected) {
1002 std::stringstream ss;
1003 ss << "{ ";
1004 for (size_t i = 0; i < r.size(); ++i) {
1005 ss << "[" << r.start(i).InMilliseconds() << ","
1006 << r.end(i).InMilliseconds() << ") ";
1008 ss << "}";
1009 EXPECT_EQ(expected, ss.str());
1012 MOCK_METHOD2(ReadDone, void(DemuxerStream::Status status,
1013 const scoped_refptr<DecoderBuffer>&));
1015 void StoreStatusAndBuffer(DemuxerStream::Status* status_out,
1016 scoped_refptr<DecoderBuffer>* buffer_out,
1017 DemuxerStream::Status status,
1018 const scoped_refptr<DecoderBuffer>& buffer) {
1019 *status_out = status;
1020 *buffer_out = buffer;
1023 void ReadUntilNotOkOrEndOfStream(DemuxerStream::Type type,
1024 DemuxerStream::Status* status,
1025 base::TimeDelta* last_timestamp) {
1026 DemuxerStream* stream = demuxer_->GetStream(type);
1027 scoped_refptr<DecoderBuffer> buffer;
1029 *last_timestamp = kNoTimestamp();
1030 do {
1031 stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
1032 base::Unretained(this), status, &buffer));
1033 base::MessageLoop::current()->RunUntilIdle();
1034 if (*status == DemuxerStream::kOk && !buffer->end_of_stream())
1035 *last_timestamp = buffer->timestamp();
1036 } while (*status == DemuxerStream::kOk && !buffer->end_of_stream());
1039 void ExpectEndOfStream(DemuxerStream::Type type) {
1040 EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk, IsEndOfStream()));
1041 demuxer_->GetStream(type)->Read(base::Bind(
1042 &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
1043 message_loop_.RunUntilIdle();
1046 void ExpectRead(DemuxerStream::Type type, int64 timestamp_in_ms) {
1047 EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk,
1048 HasTimestamp(timestamp_in_ms)));
1049 demuxer_->GetStream(type)->Read(base::Bind(
1050 &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
1051 message_loop_.RunUntilIdle();
1054 void ExpectConfigChanged(DemuxerStream::Type type) {
1055 EXPECT_CALL(*this, ReadDone(DemuxerStream::kConfigChanged, _));
1056 demuxer_->GetStream(type)->Read(base::Bind(
1057 &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
1058 message_loop_.RunUntilIdle();
1061 void CheckExpectedBuffers(DemuxerStream* stream,
1062 const std::string& expected) {
1063 std::vector<std::string> timestamps = base::SplitString(
1064 expected, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
1065 std::stringstream ss;
1066 for (size_t i = 0; i < timestamps.size(); ++i) {
1067 // Initialize status to kAborted since it's possible for Read() to return
1068 // without calling StoreStatusAndBuffer() if it doesn't have any buffers
1069 // left to return.
1070 DemuxerStream::Status status = DemuxerStream::kAborted;
1071 scoped_refptr<DecoderBuffer> buffer;
1072 stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
1073 base::Unretained(this), &status, &buffer));
1074 base::MessageLoop::current()->RunUntilIdle();
1075 if (status != DemuxerStream::kOk || buffer->end_of_stream())
1076 break;
1078 if (i > 0)
1079 ss << " ";
1080 ss << buffer->timestamp().InMilliseconds();
1082 if (buffer->is_key_frame())
1083 ss << "K";
1085 // Handle preroll buffers.
1086 if (base::EndsWith(timestamps[i], "P", base::CompareCase::SENSITIVE)) {
1087 ASSERT_EQ(kInfiniteDuration(), buffer->discard_padding().first);
1088 ASSERT_EQ(base::TimeDelta(), buffer->discard_padding().second);
1089 ss << "P";
1092 EXPECT_EQ(expected, ss.str());
1095 MOCK_METHOD1(Checkpoint, void(int id));
1097 struct BufferTimestamps {
1098 int video_time_ms;
1099 int audio_time_ms;
1101 static const int kSkip = -1;
1103 // Test parsing a WebM file.
1104 // |filename| - The name of the file in media/test/data to parse.
1105 // |timestamps| - The expected timestamps on the parsed buffers.
1106 // a timestamp of kSkip indicates that a Read() call for that stream
1107 // shouldn't be made on that iteration of the loop. If both streams have
1108 // a kSkip then the loop will terminate.
1109 bool ParseWebMFile(const std::string& filename,
1110 const BufferTimestamps* timestamps,
1111 const base::TimeDelta& duration) {
1112 return ParseWebMFile(filename, timestamps, duration, HAS_AUDIO | HAS_VIDEO);
1115 bool ParseWebMFile(const std::string& filename,
1116 const BufferTimestamps* timestamps,
1117 const base::TimeDelta& duration,
1118 int stream_flags) {
1119 EXPECT_CALL(*this, DemuxerOpened());
1120 demuxer_->Initialize(
1121 &host_, CreateInitDoneCB(duration, PIPELINE_OK), true);
1123 if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
1124 return false;
1126 // Read a WebM file into memory and send the data to the demuxer.
1127 scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(filename);
1128 EXPECT_CALL(*this, InitSegmentReceived());
1129 AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
1131 // Verify that the timestamps on the first few packets match what we
1132 // expect.
1133 for (size_t i = 0;
1134 (timestamps[i].audio_time_ms != kSkip ||
1135 timestamps[i].video_time_ms != kSkip);
1136 i++) {
1137 bool audio_read_done = false;
1138 bool video_read_done = false;
1140 if (timestamps[i].audio_time_ms != kSkip) {
1141 ReadAudio(base::Bind(&OnReadDone,
1142 base::TimeDelta::FromMilliseconds(
1143 timestamps[i].audio_time_ms),
1144 &audio_read_done));
1145 EXPECT_TRUE(audio_read_done);
1148 if (timestamps[i].video_time_ms != kSkip) {
1149 ReadVideo(base::Bind(&OnReadDone,
1150 base::TimeDelta::FromMilliseconds(
1151 timestamps[i].video_time_ms),
1152 &video_read_done));
1153 EXPECT_TRUE(video_read_done);
1157 return true;
1160 MOCK_METHOD0(DemuxerOpened, void());
1161 MOCK_METHOD2(OnEncryptedMediaInitData,
1162 void(EmeInitDataType init_data_type,
1163 const std::vector<uint8>& init_data));
1165 MOCK_METHOD0(InitSegmentReceived, void(void));
1167 void Seek(base::TimeDelta seek_time) {
1168 demuxer_->StartWaitingForSeek(seek_time);
1169 demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
1170 message_loop_.RunUntilIdle();
1173 void MarkEndOfStream(PipelineStatus status) {
1174 demuxer_->MarkEndOfStream(status);
1175 message_loop_.RunUntilIdle();
1178 bool SetTimestampOffset(const std::string& id,
1179 base::TimeDelta timestamp_offset) {
1180 if (demuxer_->IsParsingMediaSegment(id))
1181 return false;
1183 timestamp_offset_map_[id] = timestamp_offset;
1184 return true;
1187 base::MessageLoop message_loop_;
1188 MockDemuxerHost host_;
1190 scoped_ptr<ChunkDemuxer> demuxer_;
1191 ChunkDemuxer::InitSegmentReceivedCB init_segment_received_cb_;
1193 base::TimeDelta append_window_start_for_next_append_;
1194 base::TimeDelta append_window_end_for_next_append_;
1196 // Map of source id to timestamp offset to use for the next AppendData()
1197 // operation for that source id.
1198 std::map<std::string, base::TimeDelta> timestamp_offset_map_;
1200 private:
1201 DISALLOW_COPY_AND_ASSIGN(ChunkDemuxerTest);
1204 TEST_F(ChunkDemuxerTest, Init) {
1205 // Test no streams, audio-only, video-only, and audio & video scenarios.
1206 // Audio and video streams can be encrypted or not encrypted.
1207 for (int i = 0; i < 16; i++) {
1208 bool has_audio = (i & 0x1) != 0;
1209 bool has_video = (i & 0x2) != 0;
1210 bool is_audio_encrypted = (i & 0x4) != 0;
1211 bool is_video_encrypted = (i & 0x8) != 0;
1213 // No test on invalid combination.
1214 if ((!has_audio && is_audio_encrypted) ||
1215 (!has_video && is_video_encrypted)) {
1216 continue;
1219 CreateNewDemuxer();
1221 if (is_audio_encrypted || is_video_encrypted) {
1222 int need_key_count = (is_audio_encrypted ? 1 : 0) +
1223 (is_video_encrypted ? 1 : 0);
1224 EXPECT_CALL(*this, OnEncryptedMediaInitData(
1225 EmeInitDataType::WEBM,
1226 std::vector<uint8>(
1227 kEncryptedMediaInitData,
1228 kEncryptedMediaInitData +
1229 arraysize(kEncryptedMediaInitData))))
1230 .Times(Exactly(need_key_count));
1233 int stream_flags = 0;
1234 if (has_audio)
1235 stream_flags |= HAS_AUDIO;
1237 if (has_video)
1238 stream_flags |= HAS_VIDEO;
1240 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1241 stream_flags, is_audio_encrypted, is_video_encrypted));
1243 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1244 if (has_audio) {
1245 ASSERT_TRUE(audio_stream);
1247 const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
1248 EXPECT_EQ(kCodecVorbis, config.codec());
1249 EXPECT_EQ(32, config.bits_per_channel());
1250 EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
1251 EXPECT_EQ(44100, config.samples_per_second());
1252 EXPECT_TRUE(config.extra_data());
1253 EXPECT_GT(config.extra_data_size(), 0u);
1254 EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
1255 EXPECT_EQ(is_audio_encrypted,
1256 audio_stream->audio_decoder_config().is_encrypted());
1257 EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
1258 ->supports_partial_append_window_trimming());
1259 } else {
1260 EXPECT_FALSE(audio_stream);
1263 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1264 if (has_video) {
1265 EXPECT_TRUE(video_stream);
1266 EXPECT_EQ(is_video_encrypted,
1267 video_stream->video_decoder_config().is_encrypted());
1268 EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
1269 ->supports_partial_append_window_trimming());
1270 } else {
1271 EXPECT_FALSE(video_stream);
1274 ShutdownDemuxer();
1275 demuxer_.reset();
1279 // TODO(acolwell): Fold this test into Init tests since the tests are
1280 // almost identical.
1281 TEST_F(ChunkDemuxerTest, InitText) {
1282 // Test with 1 video stream and 1 text streams, and 0 or 1 audio streams.
1283 // No encryption cases handled here.
1284 bool has_video = true;
1285 bool is_audio_encrypted = false;
1286 bool is_video_encrypted = false;
1287 for (int i = 0; i < 2; i++) {
1288 bool has_audio = (i & 0x1) != 0;
1290 CreateNewDemuxer();
1292 DemuxerStream* text_stream = NULL;
1293 TextTrackConfig text_config;
1294 EXPECT_CALL(host_, AddTextStream(_, _))
1295 .WillOnce(DoAll(SaveArg<0>(&text_stream),
1296 SaveArg<1>(&text_config)));
1298 int stream_flags = HAS_TEXT;
1299 if (has_audio)
1300 stream_flags |= HAS_AUDIO;
1302 if (has_video)
1303 stream_flags |= HAS_VIDEO;
1305 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1306 stream_flags, is_audio_encrypted, is_video_encrypted));
1307 ASSERT_TRUE(text_stream);
1308 EXPECT_EQ(DemuxerStream::TEXT, text_stream->type());
1309 EXPECT_EQ(kTextSubtitles, text_config.kind());
1310 EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(text_stream)
1311 ->supports_partial_append_window_trimming());
1313 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1314 if (has_audio) {
1315 ASSERT_TRUE(audio_stream);
1317 const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
1318 EXPECT_EQ(kCodecVorbis, config.codec());
1319 EXPECT_EQ(32, config.bits_per_channel());
1320 EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
1321 EXPECT_EQ(44100, config.samples_per_second());
1322 EXPECT_TRUE(config.extra_data());
1323 EXPECT_GT(config.extra_data_size(), 0u);
1324 EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
1325 EXPECT_EQ(is_audio_encrypted,
1326 audio_stream->audio_decoder_config().is_encrypted());
1327 EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
1328 ->supports_partial_append_window_trimming());
1329 } else {
1330 EXPECT_FALSE(audio_stream);
1333 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1334 if (has_video) {
1335 EXPECT_TRUE(video_stream);
1336 EXPECT_EQ(is_video_encrypted,
1337 video_stream->video_decoder_config().is_encrypted());
1338 EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
1339 ->supports_partial_append_window_trimming());
1340 } else {
1341 EXPECT_FALSE(video_stream);
1344 ShutdownDemuxer();
1345 demuxer_.reset();
1349 TEST_F(ChunkDemuxerTest, SingleTextTrackIdChange) {
1350 // Test with 1 video stream, 1 audio, and 1 text stream. Send a second init
1351 // segment in which the text track ID changes. Verify appended buffers before
1352 // and after the second init segment map to the same underlying track buffers.
1353 CreateNewDemuxer();
1354 DemuxerStream* text_stream = NULL;
1355 TextTrackConfig text_config;
1356 EXPECT_CALL(host_, AddTextStream(_, _))
1357 .WillOnce(DoAll(SaveArg<0>(&text_stream),
1358 SaveArg<1>(&text_config)));
1359 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1360 HAS_TEXT | HAS_AUDIO | HAS_VIDEO, false, false));
1361 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1362 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1363 ASSERT_TRUE(audio_stream);
1364 ASSERT_TRUE(video_stream);
1365 ASSERT_TRUE(text_stream);
1367 AppendMuxedCluster(
1368 MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
1369 MuxedStreamInfo(kVideoTrackNum, "0K 30"),
1370 MuxedStreamInfo(kTextTrackNum, "10K"));
1371 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1373 scoped_ptr<uint8[]> info_tracks;
1374 int info_tracks_size = 0;
1375 CreateInitSegmentWithAlternateTextTrackNum(HAS_TEXT | HAS_AUDIO | HAS_VIDEO,
1376 false, false,
1377 &info_tracks, &info_tracks_size);
1378 EXPECT_CALL(*this, InitSegmentReceived());
1379 demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
1380 append_window_start_for_next_append_,
1381 append_window_end_for_next_append_,
1382 &timestamp_offset_map_[kSourceId],
1383 init_segment_received_cb_);
1385 AppendMuxedCluster(
1386 MuxedStreamInfo(kAudioTrackNum, "46K 69K"),
1387 MuxedStreamInfo(kVideoTrackNum, "60K"),
1388 MuxedStreamInfo(kAlternateTextTrackNum, "45K"));
1390 CheckExpectedRanges(kSourceId, "{ [0,92) }");
1391 CheckExpectedBuffers(audio_stream, "0K 23K 46K 69K");
1392 CheckExpectedBuffers(video_stream, "0K 30 60K");
1393 CheckExpectedBuffers(text_stream, "10K 45K");
1395 ShutdownDemuxer();
1398 TEST_F(ChunkDemuxerTest, InitSegmentSetsNeedRandomAccessPointFlag) {
1399 // Tests that non-key-frames following an init segment are allowed
1400 // and dropped, as expected if the initialization segment received
1401 // algorithm correctly sets the needs random access point flag to true for all
1402 // track buffers. Note that the first initialization segment is insufficient
1403 // to fully test this since needs random access point flag initializes to
1404 // true.
1405 CreateNewDemuxer();
1406 DemuxerStream* text_stream = NULL;
1407 EXPECT_CALL(host_, AddTextStream(_, _))
1408 .WillOnce(SaveArg<0>(&text_stream));
1409 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1410 HAS_TEXT | HAS_AUDIO | HAS_VIDEO, false, false));
1411 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1412 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1413 ASSERT_TRUE(audio_stream && video_stream && text_stream);
1415 AppendMuxedCluster(
1416 MuxedStreamInfo(kAudioTrackNum, "23K"),
1417 MuxedStreamInfo(kVideoTrackNum, "0 30K"),
1418 MuxedStreamInfo(kTextTrackNum, "25K 40K"));
1419 CheckExpectedRanges(kSourceId, "{ [23,46) }");
1421 EXPECT_CALL(*this, InitSegmentReceived());
1422 AppendInitSegment(HAS_TEXT | HAS_AUDIO | HAS_VIDEO);
1423 AppendMuxedCluster(
1424 MuxedStreamInfo(kAudioTrackNum, "46K 69K"),
1425 MuxedStreamInfo(kVideoTrackNum, "60 90K"),
1426 MuxedStreamInfo(kTextTrackNum, "80K 90K"));
1427 CheckExpectedRanges(kSourceId, "{ [23,92) }");
1429 CheckExpectedBuffers(audio_stream, "23K 46K 69K");
1430 CheckExpectedBuffers(video_stream, "30K 90K");
1431 CheckExpectedBuffers(text_stream, "25K 40K 80K 90K");
1434 // Make sure that the demuxer reports an error if Shutdown()
1435 // is called before all the initialization segments are appended.
1436 TEST_F(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppended) {
1437 EXPECT_CALL(*this, DemuxerOpened());
1438 demuxer_->Initialize(
1439 &host_, CreateInitDoneCB(
1440 kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1442 EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
1443 EXPECT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
1445 EXPECT_CALL(*this, InitSegmentReceived());
1446 AppendInitSegmentWithSourceId("audio", HAS_AUDIO);
1448 ShutdownDemuxer();
1451 TEST_F(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppendedText) {
1452 EXPECT_CALL(*this, DemuxerOpened());
1453 demuxer_->Initialize(
1454 &host_, CreateInitDoneCB(
1455 kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1457 EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
1458 EXPECT_EQ(AddId("video_and_text", HAS_VIDEO), ChunkDemuxer::kOk);
1460 EXPECT_CALL(host_, AddTextStream(_, _))
1461 .Times(Exactly(1));
1463 EXPECT_CALL(*this, InitSegmentReceived());
1464 AppendInitSegmentWithSourceId("video_and_text", HAS_VIDEO | HAS_TEXT);
1466 ShutdownDemuxer();
1469 // Verifies that all streams waiting for data receive an end of stream
1470 // buffer when Shutdown() is called.
1471 TEST_F(ChunkDemuxerTest, Shutdown_EndOfStreamWhileWaitingForData) {
1472 DemuxerStream* text_stream = NULL;
1473 EXPECT_CALL(host_, AddTextStream(_, _))
1474 .WillOnce(SaveArg<0>(&text_stream));
1475 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
1477 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1478 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1480 bool audio_read_done = false;
1481 bool video_read_done = false;
1482 bool text_read_done = false;
1483 audio_stream->Read(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
1484 video_stream->Read(base::Bind(&OnReadDone_EOSExpected, &video_read_done));
1485 text_stream->Read(base::Bind(&OnReadDone_EOSExpected, &text_read_done));
1486 message_loop_.RunUntilIdle();
1488 EXPECT_FALSE(audio_read_done);
1489 EXPECT_FALSE(video_read_done);
1490 EXPECT_FALSE(text_read_done);
1492 ShutdownDemuxer();
1494 EXPECT_TRUE(audio_read_done);
1495 EXPECT_TRUE(video_read_done);
1496 EXPECT_TRUE(text_read_done);
1499 // Test that Seek() completes successfully when the first cluster
1500 // arrives.
1501 TEST_F(ChunkDemuxerTest, AppendDataAfterSeek) {
1502 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1503 AppendCluster(kDefaultFirstCluster());
1505 InSequence s;
1507 EXPECT_CALL(*this, Checkpoint(1));
1509 Seek(base::TimeDelta::FromMilliseconds(46));
1511 EXPECT_CALL(*this, Checkpoint(2));
1513 Checkpoint(1);
1515 AppendCluster(kDefaultSecondCluster());
1517 message_loop_.RunUntilIdle();
1519 Checkpoint(2);
1522 // Test that parsing errors are handled for clusters appended after init.
1523 TEST_F(ChunkDemuxerTest, ErrorWhileParsingClusterAfterInit) {
1524 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1525 AppendCluster(kDefaultFirstCluster());
1527 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1528 AppendGarbage();
1531 // Test the case where a Seek() is requested while the parser
1532 // is in the middle of cluster. This is to verify that the parser
1533 // does not reset itself on a seek.
1534 TEST_F(ChunkDemuxerTest, SeekWhileParsingCluster) {
1535 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1537 InSequence s;
1539 scoped_ptr<Cluster> cluster_a(GenerateCluster(0, 6));
1541 // Split the cluster into two appends at an arbitrary point near the end.
1542 int first_append_size = cluster_a->size() - 11;
1543 int second_append_size = cluster_a->size() - first_append_size;
1545 // Append the first part of the cluster.
1546 AppendData(cluster_a->data(), first_append_size);
1548 ExpectRead(DemuxerStream::AUDIO, 0);
1549 ExpectRead(DemuxerStream::VIDEO, 0);
1550 ExpectRead(DemuxerStream::AUDIO, kAudioBlockDuration);
1552 Seek(base::TimeDelta::FromSeconds(5));
1554 // Append the rest of the cluster.
1555 AppendData(cluster_a->data() + first_append_size, second_append_size);
1557 // Append the new cluster and verify that only the blocks
1558 // in the new cluster are returned.
1559 AppendCluster(GenerateCluster(5000, 6));
1560 GenerateExpectedReads(5000, 6);
1563 // Test the case where AppendData() is called before Init().
1564 TEST_F(ChunkDemuxerTest, AppendDataBeforeInit) {
1565 scoped_ptr<uint8[]> info_tracks;
1566 int info_tracks_size = 0;
1567 CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
1568 false, false, &info_tracks, &info_tracks_size);
1569 demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
1570 append_window_start_for_next_append_,
1571 append_window_end_for_next_append_,
1572 &timestamp_offset_map_[kSourceId],
1573 init_segment_received_cb_);
1576 // Make sure Read() callbacks are dispatched with the proper data.
1577 TEST_F(ChunkDemuxerTest, Read) {
1578 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1580 AppendCluster(kDefaultFirstCluster());
1582 bool audio_read_done = false;
1583 bool video_read_done = false;
1584 ReadAudio(base::Bind(&OnReadDone,
1585 base::TimeDelta::FromMilliseconds(0),
1586 &audio_read_done));
1587 ReadVideo(base::Bind(&OnReadDone,
1588 base::TimeDelta::FromMilliseconds(0),
1589 &video_read_done));
1591 EXPECT_TRUE(audio_read_done);
1592 EXPECT_TRUE(video_read_done);
1595 TEST_F(ChunkDemuxerTest, OutOfOrderClusters) {
1596 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1597 AppendCluster(kDefaultFirstCluster());
1598 AppendCluster(GenerateCluster(10, 4));
1600 // Make sure that AppendCluster() does not fail with a cluster that has
1601 // overlaps with the previously appended cluster.
1602 AppendCluster(GenerateCluster(5, 4));
1604 // Verify that AppendData() can still accept more data.
1605 scoped_ptr<Cluster> cluster_c(GenerateCluster(45, 2));
1606 demuxer_->AppendData(kSourceId, cluster_c->data(), cluster_c->size(),
1607 append_window_start_for_next_append_,
1608 append_window_end_for_next_append_,
1609 &timestamp_offset_map_[kSourceId],
1610 init_segment_received_cb_);
1613 TEST_F(ChunkDemuxerTest, NonMonotonicButAboveClusterTimecode) {
1614 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1615 AppendCluster(kDefaultFirstCluster());
1617 ClusterBuilder cb;
1619 // Test the case where block timecodes are not monotonically
1620 // increasing but stay above the cluster timecode.
1621 cb.SetClusterTimecode(5);
1622 AddSimpleBlock(&cb, kAudioTrackNum, 5);
1623 AddSimpleBlock(&cb, kVideoTrackNum, 10);
1624 AddSimpleBlock(&cb, kAudioTrackNum, 7);
1625 AddSimpleBlock(&cb, kVideoTrackNum, 15);
1627 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1628 AppendCluster(cb.Finish());
1630 // Verify that AppendData() ignores data after the error.
1631 scoped_ptr<Cluster> cluster_b(GenerateCluster(20, 2));
1632 demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
1633 append_window_start_for_next_append_,
1634 append_window_end_for_next_append_,
1635 &timestamp_offset_map_[kSourceId],
1636 init_segment_received_cb_);
1639 TEST_F(ChunkDemuxerTest, BackwardsAndBeforeClusterTimecode) {
1640 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1641 AppendCluster(kDefaultFirstCluster());
1643 ClusterBuilder cb;
1645 // Test timecodes going backwards and including values less than the cluster
1646 // timecode.
1647 cb.SetClusterTimecode(5);
1648 AddSimpleBlock(&cb, kAudioTrackNum, 5);
1649 AddSimpleBlock(&cb, kVideoTrackNum, 5);
1650 AddSimpleBlock(&cb, kAudioTrackNum, 3);
1651 AddSimpleBlock(&cb, kVideoTrackNum, 3);
1653 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1654 AppendCluster(cb.Finish());
1656 // Verify that AppendData() ignores data after the error.
1657 scoped_ptr<Cluster> cluster_b(GenerateCluster(6, 2));
1658 demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
1659 append_window_start_for_next_append_,
1660 append_window_end_for_next_append_,
1661 &timestamp_offset_map_[kSourceId],
1662 init_segment_received_cb_);
1666 TEST_F(ChunkDemuxerTest, PerStreamMonotonicallyIncreasingTimestamps) {
1667 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1668 AppendCluster(kDefaultFirstCluster());
1670 ClusterBuilder cb;
1672 // Test monotonic increasing timestamps on a per stream
1673 // basis.
1674 cb.SetClusterTimecode(5);
1675 AddSimpleBlock(&cb, kAudioTrackNum, 5);
1676 AddSimpleBlock(&cb, kVideoTrackNum, 5);
1677 AddSimpleBlock(&cb, kAudioTrackNum, 4);
1678 AddSimpleBlock(&cb, kVideoTrackNum, 7);
1680 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1681 AppendCluster(cb.Finish());
1684 // Test the case where a cluster is passed to AppendCluster() before
1685 // INFO & TRACKS data.
1686 TEST_F(ChunkDemuxerTest, ClusterBeforeInitSegment) {
1687 EXPECT_CALL(*this, DemuxerOpened());
1688 demuxer_->Initialize(
1689 &host_, NewExpectedStatusCB(PIPELINE_ERROR_DECODE), true);
1691 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1693 AppendCluster(GenerateCluster(0, 1));
1696 // Test cases where we get an MarkEndOfStream() call during initialization.
1697 TEST_F(ChunkDemuxerTest, EOSDuringInit) {
1698 EXPECT_CALL(*this, DemuxerOpened());
1699 demuxer_->Initialize(
1700 &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1701 MarkEndOfStream(PIPELINE_OK);
1704 TEST_F(ChunkDemuxerTest, EndOfStreamWithNoAppend) {
1705 EXPECT_CALL(*this, DemuxerOpened());
1706 demuxer_->Initialize(
1707 &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1709 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1711 CheckExpectedRanges("{ }");
1712 MarkEndOfStream(PIPELINE_OK);
1713 ShutdownDemuxer();
1714 CheckExpectedRanges("{ }");
1715 demuxer_->RemoveId(kSourceId);
1716 demuxer_.reset();
1719 TEST_F(ChunkDemuxerTest, EndOfStreamWithNoMediaAppend) {
1720 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1722 CheckExpectedRanges("{ }");
1723 MarkEndOfStream(PIPELINE_OK);
1724 CheckExpectedRanges("{ }");
1727 TEST_F(ChunkDemuxerTest, DecodeErrorEndOfStream) {
1728 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1730 AppendCluster(kDefaultFirstCluster());
1731 CheckExpectedRanges(kDefaultFirstClusterRange);
1733 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1734 MarkEndOfStream(PIPELINE_ERROR_DECODE);
1735 CheckExpectedRanges(kDefaultFirstClusterRange);
1738 TEST_F(ChunkDemuxerTest, NetworkErrorEndOfStream) {
1739 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1741 AppendCluster(kDefaultFirstCluster());
1742 CheckExpectedRanges(kDefaultFirstClusterRange);
1744 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_NETWORK));
1745 MarkEndOfStream(PIPELINE_ERROR_NETWORK);
1748 // Helper class to reduce duplicate code when testing end of stream
1749 // Read() behavior.
1750 class EndOfStreamHelper {
1751 public:
1752 explicit EndOfStreamHelper(Demuxer* demuxer)
1753 : demuxer_(demuxer),
1754 audio_read_done_(false),
1755 video_read_done_(false) {
1758 // Request a read on the audio and video streams.
1759 void RequestReads() {
1760 EXPECT_FALSE(audio_read_done_);
1761 EXPECT_FALSE(video_read_done_);
1763 DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
1764 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
1766 audio->Read(base::Bind(&OnEndOfStreamReadDone, &audio_read_done_));
1767 video->Read(base::Bind(&OnEndOfStreamReadDone, &video_read_done_));
1768 base::MessageLoop::current()->RunUntilIdle();
1771 // Check to see if |audio_read_done_| and |video_read_done_| variables
1772 // match |expected|.
1773 void CheckIfReadDonesWereCalled(bool expected) {
1774 base::MessageLoop::current()->RunUntilIdle();
1775 EXPECT_EQ(expected, audio_read_done_);
1776 EXPECT_EQ(expected, video_read_done_);
1779 private:
1780 static void OnEndOfStreamReadDone(
1781 bool* called,
1782 DemuxerStream::Status status,
1783 const scoped_refptr<DecoderBuffer>& buffer) {
1784 EXPECT_EQ(status, DemuxerStream::kOk);
1785 EXPECT_TRUE(buffer->end_of_stream());
1786 *called = true;
1789 Demuxer* demuxer_;
1790 bool audio_read_done_;
1791 bool video_read_done_;
1793 DISALLOW_COPY_AND_ASSIGN(EndOfStreamHelper);
1796 // Make sure that all pending reads that we don't have media data for get an
1797 // "end of stream" buffer when MarkEndOfStream() is called.
1798 TEST_F(ChunkDemuxerTest, EndOfStreamWithPendingReads) {
1799 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1801 AppendCluster(GenerateCluster(0, 2));
1803 bool audio_read_done_1 = false;
1804 bool video_read_done_1 = false;
1805 EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
1806 EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
1808 ReadAudio(base::Bind(&OnReadDone,
1809 base::TimeDelta::FromMilliseconds(0),
1810 &audio_read_done_1));
1811 ReadVideo(base::Bind(&OnReadDone,
1812 base::TimeDelta::FromMilliseconds(0),
1813 &video_read_done_1));
1814 message_loop_.RunUntilIdle();
1816 EXPECT_TRUE(audio_read_done_1);
1817 EXPECT_TRUE(video_read_done_1);
1819 end_of_stream_helper_1.RequestReads();
1821 EXPECT_CALL(host_, SetDuration(
1822 base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
1823 MarkEndOfStream(PIPELINE_OK);
1825 end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
1827 end_of_stream_helper_2.RequestReads();
1828 end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
1831 // Make sure that all Read() calls after we get an MarkEndOfStream()
1832 // call return an "end of stream" buffer.
1833 TEST_F(ChunkDemuxerTest, ReadsAfterEndOfStream) {
1834 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1836 AppendCluster(GenerateCluster(0, 2));
1838 bool audio_read_done_1 = false;
1839 bool video_read_done_1 = false;
1840 EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
1841 EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
1842 EndOfStreamHelper end_of_stream_helper_3(demuxer_.get());
1844 ReadAudio(base::Bind(&OnReadDone,
1845 base::TimeDelta::FromMilliseconds(0),
1846 &audio_read_done_1));
1847 ReadVideo(base::Bind(&OnReadDone,
1848 base::TimeDelta::FromMilliseconds(0),
1849 &video_read_done_1));
1851 end_of_stream_helper_1.RequestReads();
1853 EXPECT_TRUE(audio_read_done_1);
1854 EXPECT_TRUE(video_read_done_1);
1855 end_of_stream_helper_1.CheckIfReadDonesWereCalled(false);
1857 EXPECT_CALL(host_, SetDuration(
1858 base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
1859 MarkEndOfStream(PIPELINE_OK);
1861 end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
1863 // Request a few more reads and make sure we immediately get
1864 // end of stream buffers.
1865 end_of_stream_helper_2.RequestReads();
1866 end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
1868 end_of_stream_helper_3.RequestReads();
1869 end_of_stream_helper_3.CheckIfReadDonesWereCalled(true);
1872 TEST_F(ChunkDemuxerTest, EndOfStreamDuringCanceledSeek) {
1873 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1875 AppendCluster(0, 10);
1876 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(138)));
1877 MarkEndOfStream(PIPELINE_OK);
1879 // Start the first seek.
1880 Seek(base::TimeDelta::FromMilliseconds(20));
1882 // Simulate another seek being requested before the first
1883 // seek has finished prerolling.
1884 base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(30);
1885 demuxer_->CancelPendingSeek(seek_time2);
1887 // Finish second seek.
1888 Seek(seek_time2);
1890 DemuxerStream::Status status;
1891 base::TimeDelta last_timestamp;
1893 // Make sure audio can reach end of stream.
1894 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
1895 ASSERT_EQ(status, DemuxerStream::kOk);
1897 // Make sure video can reach end of stream.
1898 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
1899 ASSERT_EQ(status, DemuxerStream::kOk);
1902 // Verify buffered range change behavior for audio/video/text tracks.
1903 TEST_F(ChunkDemuxerTest, EndOfStreamRangeChanges) {
1904 DemuxerStream* text_stream = NULL;
1906 EXPECT_CALL(host_, AddTextStream(_, _))
1907 .WillOnce(SaveArg<0>(&text_stream));
1908 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
1910 AppendMuxedCluster(
1911 MuxedStreamInfo(kVideoTrackNum, "0K 33"),
1912 MuxedStreamInfo(kAudioTrackNum, "0K 23K"));
1914 // Check expected ranges and verify that an empty text track does not
1915 // affect the expected ranges.
1916 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1918 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
1919 MarkEndOfStream(PIPELINE_OK);
1921 // Check expected ranges and verify that an empty text track does not
1922 // affect the expected ranges.
1923 CheckExpectedRanges(kSourceId, "{ [0,66) }");
1925 // Unmark end of stream state and verify that the ranges return to
1926 // their pre-"end of stream" values.
1927 demuxer_->UnmarkEndOfStream();
1928 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1930 // Add text track data and verify that the buffered ranges don't change
1931 // since the intersection of all the tracks doesn't change.
1932 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(200)));
1933 AppendMuxedCluster(
1934 MuxedStreamInfo(kVideoTrackNum, "0K 33"),
1935 MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
1936 MuxedStreamInfo(kTextTrackNum, "0K 100K"));
1937 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1939 // Mark end of stream and verify that text track data is reflected in
1940 // the new range.
1941 MarkEndOfStream(PIPELINE_OK);
1942 CheckExpectedRanges(kSourceId, "{ [0,200) }");
1945 // Make sure AppendData() will accept elements that span multiple calls.
1946 TEST_F(ChunkDemuxerTest, AppendingInPieces) {
1947 EXPECT_CALL(*this, DemuxerOpened());
1948 demuxer_->Initialize(
1949 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
1951 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1953 scoped_ptr<uint8[]> info_tracks;
1954 int info_tracks_size = 0;
1955 CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
1956 false, false, &info_tracks, &info_tracks_size);
1958 scoped_ptr<Cluster> cluster_a(kDefaultFirstCluster());
1959 scoped_ptr<Cluster> cluster_b(kDefaultSecondCluster());
1961 size_t buffer_size = info_tracks_size + cluster_a->size() + cluster_b->size();
1962 scoped_ptr<uint8[]> buffer(new uint8[buffer_size]);
1963 uint8* dst = buffer.get();
1964 memcpy(dst, info_tracks.get(), info_tracks_size);
1965 dst += info_tracks_size;
1967 memcpy(dst, cluster_a->data(), cluster_a->size());
1968 dst += cluster_a->size();
1970 memcpy(dst, cluster_b->data(), cluster_b->size());
1971 dst += cluster_b->size();
1973 EXPECT_CALL(*this, InitSegmentReceived());
1974 AppendDataInPieces(buffer.get(), buffer_size);
1976 GenerateExpectedReads(0, 9);
1979 TEST_F(ChunkDemuxerTest, WebMFile_AudioAndVideo) {
1980 struct BufferTimestamps buffer_timestamps[] = {
1981 {0, 0},
1982 {33, 3},
1983 {67, 6},
1984 {100, 9},
1985 {133, 12},
1986 {kSkip, kSkip},
1989 // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
1990 // ParseWebMFile() call's expected duration, below, once the file is fixed to
1991 // have the correct duration in the init segment. See http://crbug.com/354284.
1992 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
1994 ASSERT_TRUE(ParseWebMFile("bear-320x240.webm", buffer_timestamps,
1995 base::TimeDelta::FromMilliseconds(2744)));
1998 TEST_F(ChunkDemuxerTest, WebMFile_LiveAudioAndVideo) {
1999 struct BufferTimestamps buffer_timestamps[] = {
2000 {0, 0},
2001 {33, 3},
2002 {67, 6},
2003 {100, 9},
2004 {133, 12},
2005 {kSkip, kSkip},
2008 ASSERT_TRUE(ParseWebMFile("bear-320x240-live.webm", buffer_timestamps,
2009 kInfiniteDuration()));
2011 DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
2012 EXPECT_EQ(DemuxerStream::LIVENESS_LIVE, audio->liveness());
2013 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2014 EXPECT_EQ(DemuxerStream::LIVENESS_LIVE, video->liveness());
2017 TEST_F(ChunkDemuxerTest, WebMFile_AudioOnly) {
2018 struct BufferTimestamps buffer_timestamps[] = {
2019 {kSkip, 0},
2020 {kSkip, 3},
2021 {kSkip, 6},
2022 {kSkip, 9},
2023 {kSkip, 12},
2024 {kSkip, kSkip},
2027 // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
2028 // ParseWebMFile() call's expected duration, below, once the file is fixed to
2029 // have the correct duration in the init segment. See http://crbug.com/354284.
2030 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
2032 ASSERT_TRUE(ParseWebMFile("bear-320x240-audio-only.webm", buffer_timestamps,
2033 base::TimeDelta::FromMilliseconds(2744),
2034 HAS_AUDIO));
2037 TEST_F(ChunkDemuxerTest, WebMFile_VideoOnly) {
2038 struct BufferTimestamps buffer_timestamps[] = {
2039 {0, kSkip},
2040 {33, kSkip},
2041 {67, kSkip},
2042 {100, kSkip},
2043 {133, kSkip},
2044 {kSkip, kSkip},
2047 // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
2048 // ParseWebMFile() call's expected duration, below, once the file is fixed to
2049 // have the correct duration in the init segment. See http://crbug.com/354284.
2050 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2736)));
2052 ASSERT_TRUE(ParseWebMFile("bear-320x240-video-only.webm", buffer_timestamps,
2053 base::TimeDelta::FromMilliseconds(2703),
2054 HAS_VIDEO));
2057 TEST_F(ChunkDemuxerTest, WebMFile_AltRefFrames) {
2058 struct BufferTimestamps buffer_timestamps[] = {
2059 {0, 0},
2060 {33, 3},
2061 {33, 6},
2062 {67, 9},
2063 {100, 12},
2064 {kSkip, kSkip},
2067 ASSERT_TRUE(ParseWebMFile("bear-320x240-altref.webm", buffer_timestamps,
2068 base::TimeDelta::FromMilliseconds(2767)));
2071 // Verify that we output buffers before the entire cluster has been parsed.
2072 TEST_F(ChunkDemuxerTest, IncrementalClusterParsing) {
2073 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2074 AppendEmptyCluster(0);
2076 scoped_ptr<Cluster> cluster(GenerateCluster(0, 6));
2078 bool audio_read_done = false;
2079 bool video_read_done = false;
2080 ReadAudio(base::Bind(&OnReadDone,
2081 base::TimeDelta::FromMilliseconds(0),
2082 &audio_read_done));
2083 ReadVideo(base::Bind(&OnReadDone,
2084 base::TimeDelta::FromMilliseconds(0),
2085 &video_read_done));
2087 // Make sure the reads haven't completed yet.
2088 EXPECT_FALSE(audio_read_done);
2089 EXPECT_FALSE(video_read_done);
2091 // Append data one byte at a time until one or both reads complete.
2092 int i = 0;
2093 for (; i < cluster->size() && !(audio_read_done || video_read_done); ++i) {
2094 AppendData(cluster->data() + i, 1);
2095 message_loop_.RunUntilIdle();
2098 EXPECT_TRUE(audio_read_done || video_read_done);
2099 EXPECT_GT(i, 0);
2100 EXPECT_LT(i, cluster->size());
2102 audio_read_done = false;
2103 video_read_done = false;
2104 ReadAudio(base::Bind(&OnReadDone,
2105 base::TimeDelta::FromMilliseconds(23),
2106 &audio_read_done));
2107 ReadVideo(base::Bind(&OnReadDone,
2108 base::TimeDelta::FromMilliseconds(33),
2109 &video_read_done));
2111 // Make sure the reads haven't completed yet.
2112 EXPECT_FALSE(audio_read_done);
2113 EXPECT_FALSE(video_read_done);
2115 // Append the remaining data.
2116 ASSERT_LT(i, cluster->size());
2117 AppendData(cluster->data() + i, cluster->size() - i);
2119 message_loop_.RunUntilIdle();
2121 EXPECT_TRUE(audio_read_done);
2122 EXPECT_TRUE(video_read_done);
2125 TEST_F(ChunkDemuxerTest, ParseErrorDuringInit) {
2126 EXPECT_CALL(*this, DemuxerOpened());
2127 demuxer_->Initialize(
2128 &host_, CreateInitDoneCB(
2129 kNoTimestamp(), PIPELINE_ERROR_DECODE), true);
2131 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
2133 uint8 tmp = 0;
2134 demuxer_->AppendData(kSourceId, &tmp, 1,
2135 append_window_start_for_next_append_,
2136 append_window_end_for_next_append_,
2137 &timestamp_offset_map_[kSourceId],
2138 init_segment_received_cb_);
2141 TEST_F(ChunkDemuxerTest, AVHeadersWithAudioOnlyType) {
2142 EXPECT_CALL(*this, DemuxerOpened());
2143 demuxer_->Initialize(
2144 &host_, CreateInitDoneCB(kNoTimestamp(),
2145 PIPELINE_ERROR_DECODE), true);
2147 std::vector<std::string> codecs(1);
2148 codecs[0] = "vorbis";
2149 ASSERT_EQ(demuxer_->AddId(kSourceId, "audio/webm", codecs),
2150 ChunkDemuxer::kOk);
2152 AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
2155 TEST_F(ChunkDemuxerTest, AVHeadersWithVideoOnlyType) {
2156 EXPECT_CALL(*this, DemuxerOpened());
2157 demuxer_->Initialize(
2158 &host_, CreateInitDoneCB(kNoTimestamp(),
2159 PIPELINE_ERROR_DECODE), true);
2161 std::vector<std::string> codecs(1);
2162 codecs[0] = "vp8";
2163 ASSERT_EQ(demuxer_->AddId(kSourceId, "video/webm", codecs),
2164 ChunkDemuxer::kOk);
2166 AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
2169 TEST_F(ChunkDemuxerTest, MultipleHeaders) {
2170 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2172 AppendCluster(kDefaultFirstCluster());
2174 // Append another identical initialization segment.
2175 EXPECT_CALL(*this, InitSegmentReceived());
2176 AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
2178 AppendCluster(kDefaultSecondCluster());
2180 GenerateExpectedReads(0, 9);
2183 TEST_F(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideo) {
2184 std::string audio_id = "audio1";
2185 std::string video_id = "video1";
2186 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2188 // Append audio and video data into separate source ids.
2189 AppendCluster(audio_id,
2190 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2191 GenerateAudioStreamExpectedReads(0, 4);
2192 AppendCluster(video_id,
2193 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2194 GenerateVideoStreamExpectedReads(0, 4);
2197 TEST_F(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideoText) {
2198 // TODO(matthewjheaney): Here and elsewhere, we need more tests
2199 // for inband text tracks (http://crbug/321455).
2201 std::string audio_id = "audio1";
2202 std::string video_id = "video1";
2204 EXPECT_CALL(host_, AddTextStream(_, _))
2205 .Times(Exactly(2));
2206 ASSERT_TRUE(InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, true));
2208 // Append audio and video data into separate source ids.
2209 AppendCluster(audio_id,
2210 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2211 GenerateAudioStreamExpectedReads(0, 4);
2212 AppendCluster(video_id,
2213 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2214 GenerateVideoStreamExpectedReads(0, 4);
2217 TEST_F(ChunkDemuxerTest, AddIdFailures) {
2218 EXPECT_CALL(*this, DemuxerOpened());
2219 demuxer_->Initialize(
2220 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2222 std::string audio_id = "audio1";
2223 std::string video_id = "video1";
2225 ASSERT_EQ(AddId(audio_id, HAS_AUDIO), ChunkDemuxer::kOk);
2227 // Adding an id with audio/video should fail because we already added audio.
2228 ASSERT_EQ(AddId(), ChunkDemuxer::kReachedIdLimit);
2230 EXPECT_CALL(*this, InitSegmentReceived());
2231 AppendInitSegmentWithSourceId(audio_id, HAS_AUDIO);
2233 // Adding an id after append should fail.
2234 ASSERT_EQ(AddId(video_id, HAS_VIDEO), ChunkDemuxer::kReachedIdLimit);
2237 // Test that Read() calls after a RemoveId() return "end of stream" buffers.
2238 TEST_F(ChunkDemuxerTest, RemoveId) {
2239 std::string audio_id = "audio1";
2240 std::string video_id = "video1";
2241 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2243 // Append audio and video data into separate source ids.
2244 AppendCluster(audio_id,
2245 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2246 AppendCluster(video_id,
2247 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2249 // Read() from audio should return normal buffers.
2250 GenerateAudioStreamExpectedReads(0, 4);
2252 // Remove the audio id.
2253 demuxer_->RemoveId(audio_id);
2255 // Read() from audio should return "end of stream" buffers.
2256 bool audio_read_done = false;
2257 ReadAudio(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
2258 message_loop_.RunUntilIdle();
2259 EXPECT_TRUE(audio_read_done);
2261 // Read() from video should still return normal buffers.
2262 GenerateVideoStreamExpectedReads(0, 4);
2265 // Test that removing an ID immediately after adding it does not interfere with
2266 // quota for new IDs in the future.
2267 TEST_F(ChunkDemuxerTest, RemoveAndAddId) {
2268 std::string audio_id_1 = "audio1";
2269 ASSERT_TRUE(AddId(audio_id_1, HAS_AUDIO) == ChunkDemuxer::kOk);
2270 demuxer_->RemoveId(audio_id_1);
2272 std::string audio_id_2 = "audio2";
2273 ASSERT_TRUE(AddId(audio_id_2, HAS_AUDIO) == ChunkDemuxer::kOk);
2276 TEST_F(ChunkDemuxerTest, SeekCanceled) {
2277 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2279 // Append cluster at the beginning of the stream.
2280 AppendCluster(GenerateCluster(0, 4));
2282 // Seek to an unbuffered region.
2283 Seek(base::TimeDelta::FromSeconds(50));
2285 // Attempt to read in unbuffered area; should not fulfill the read.
2286 bool audio_read_done = false;
2287 bool video_read_done = false;
2288 ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
2289 ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
2290 EXPECT_FALSE(audio_read_done);
2291 EXPECT_FALSE(video_read_done);
2293 // Now cancel the pending seek, which should flush the reads with empty
2294 // buffers.
2295 base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
2296 demuxer_->CancelPendingSeek(seek_time);
2297 message_loop_.RunUntilIdle();
2298 EXPECT_TRUE(audio_read_done);
2299 EXPECT_TRUE(video_read_done);
2301 // A seek back to the buffered region should succeed.
2302 Seek(seek_time);
2303 GenerateExpectedReads(0, 4);
2306 TEST_F(ChunkDemuxerTest, SeekCanceledWhileWaitingForSeek) {
2307 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2309 // Append cluster at the beginning of the stream.
2310 AppendCluster(GenerateCluster(0, 4));
2312 // Start waiting for a seek.
2313 base::TimeDelta seek_time1 = base::TimeDelta::FromSeconds(50);
2314 base::TimeDelta seek_time2 = base::TimeDelta::FromSeconds(0);
2315 demuxer_->StartWaitingForSeek(seek_time1);
2317 // Now cancel the upcoming seek to an unbuffered region.
2318 demuxer_->CancelPendingSeek(seek_time2);
2319 demuxer_->Seek(seek_time1, NewExpectedStatusCB(PIPELINE_OK));
2321 // Read requests should be fulfilled with empty buffers.
2322 bool audio_read_done = false;
2323 bool video_read_done = false;
2324 ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
2325 ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
2326 EXPECT_TRUE(audio_read_done);
2327 EXPECT_TRUE(video_read_done);
2329 // A seek back to the buffered region should succeed.
2330 Seek(seek_time2);
2331 GenerateExpectedReads(0, 4);
2334 // Test that Seek() successfully seeks to all source IDs.
2335 TEST_F(ChunkDemuxerTest, SeekAudioAndVideoSources) {
2336 std::string audio_id = "audio1";
2337 std::string video_id = "video1";
2338 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2340 AppendCluster(
2341 audio_id,
2342 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2343 AppendCluster(
2344 video_id,
2345 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2347 // Read() should return buffers at 0.
2348 bool audio_read_done = false;
2349 bool video_read_done = false;
2350 ReadAudio(base::Bind(&OnReadDone,
2351 base::TimeDelta::FromMilliseconds(0),
2352 &audio_read_done));
2353 ReadVideo(base::Bind(&OnReadDone,
2354 base::TimeDelta::FromMilliseconds(0),
2355 &video_read_done));
2356 EXPECT_TRUE(audio_read_done);
2357 EXPECT_TRUE(video_read_done);
2359 // Seek to 3 (an unbuffered region).
2360 Seek(base::TimeDelta::FromSeconds(3));
2362 audio_read_done = false;
2363 video_read_done = false;
2364 ReadAudio(base::Bind(&OnReadDone,
2365 base::TimeDelta::FromSeconds(3),
2366 &audio_read_done));
2367 ReadVideo(base::Bind(&OnReadDone,
2368 base::TimeDelta::FromSeconds(3),
2369 &video_read_done));
2370 // Read()s should not return until after data is appended at the Seek point.
2371 EXPECT_FALSE(audio_read_done);
2372 EXPECT_FALSE(video_read_done);
2374 AppendCluster(audio_id,
2375 GenerateSingleStreamCluster(
2376 3000, 3092, kAudioTrackNum, kAudioBlockDuration));
2377 AppendCluster(video_id,
2378 GenerateSingleStreamCluster(
2379 3000, 3132, kVideoTrackNum, kVideoBlockDuration));
2381 message_loop_.RunUntilIdle();
2383 // Read() should return buffers at 3.
2384 EXPECT_TRUE(audio_read_done);
2385 EXPECT_TRUE(video_read_done);
2388 // Test that Seek() completes successfully when EndOfStream
2389 // is called before data is available for that seek point.
2390 // This scenario might be useful if seeking past the end of stream
2391 // of either audio or video (or both).
2392 TEST_F(ChunkDemuxerTest, EndOfStreamAfterPastEosSeek) {
2393 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2395 AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
2396 AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
2398 // Seeking past the end of video.
2399 // Note: audio data is available for that seek point.
2400 bool seek_cb_was_called = false;
2401 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(110);
2402 demuxer_->StartWaitingForSeek(seek_time);
2403 demuxer_->Seek(seek_time,
2404 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
2405 message_loop_.RunUntilIdle();
2407 EXPECT_FALSE(seek_cb_was_called);
2409 EXPECT_CALL(host_, SetDuration(
2410 base::TimeDelta::FromMilliseconds(120)));
2411 MarkEndOfStream(PIPELINE_OK);
2412 message_loop_.RunUntilIdle();
2414 EXPECT_TRUE(seek_cb_was_called);
2416 ShutdownDemuxer();
2419 // Test that EndOfStream is ignored if coming during a pending seek
2420 // whose seek time is before some existing ranges.
2421 TEST_F(ChunkDemuxerTest, EndOfStreamDuringPendingSeek) {
2422 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2424 AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
2425 AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
2426 AppendCluster(GenerateSingleStreamCluster(200, 300, kAudioTrackNum, 10));
2427 AppendCluster(GenerateSingleStreamCluster(200, 300, kVideoTrackNum, 5));
2429 bool seek_cb_was_called = false;
2430 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(160);
2431 demuxer_->StartWaitingForSeek(seek_time);
2432 demuxer_->Seek(seek_time,
2433 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
2434 message_loop_.RunUntilIdle();
2436 EXPECT_FALSE(seek_cb_was_called);
2438 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(300)));
2439 MarkEndOfStream(PIPELINE_OK);
2440 message_loop_.RunUntilIdle();
2442 EXPECT_FALSE(seek_cb_was_called);
2444 demuxer_->UnmarkEndOfStream();
2446 AppendCluster(GenerateSingleStreamCluster(140, 180, kAudioTrackNum, 10));
2447 AppendCluster(GenerateSingleStreamCluster(140, 180, kVideoTrackNum, 5));
2449 message_loop_.RunUntilIdle();
2451 EXPECT_TRUE(seek_cb_was_called);
2453 ShutdownDemuxer();
2456 // Test ranges in an audio-only stream.
2457 TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioIdOnly) {
2458 EXPECT_CALL(*this, DemuxerOpened());
2459 demuxer_->Initialize(
2460 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2462 ASSERT_EQ(AddId(kSourceId, HAS_AUDIO), ChunkDemuxer::kOk);
2463 EXPECT_CALL(*this, InitSegmentReceived());
2464 AppendInitSegment(HAS_AUDIO);
2466 // Test a simple cluster.
2467 AppendCluster(
2468 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2470 CheckExpectedRanges("{ [0,92) }");
2472 // Append a disjoint cluster to check for two separate ranges.
2473 AppendCluster(GenerateSingleStreamCluster(
2474 150, 219, kAudioTrackNum, kAudioBlockDuration));
2476 CheckExpectedRanges("{ [0,92) [150,219) }");
2479 // Test ranges in a video-only stream.
2480 TEST_F(ChunkDemuxerTest, GetBufferedRanges_VideoIdOnly) {
2481 EXPECT_CALL(*this, DemuxerOpened());
2482 demuxer_->Initialize(
2483 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2485 ASSERT_EQ(AddId(kSourceId, HAS_VIDEO), ChunkDemuxer::kOk);
2486 EXPECT_CALL(*this, InitSegmentReceived());
2487 AppendInitSegment(HAS_VIDEO);
2489 // Test a simple cluster.
2490 AppendCluster(
2491 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2493 CheckExpectedRanges("{ [0,132) }");
2495 // Append a disjoint cluster to check for two separate ranges.
2496 AppendCluster(GenerateSingleStreamCluster(
2497 200, 299, kVideoTrackNum, kVideoBlockDuration));
2499 CheckExpectedRanges("{ [0,132) [200,299) }");
2502 TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideo) {
2503 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2505 // Audio: 0 -> 23
2506 // Video: 0 -> 33
2507 // Buffered Range: 0 -> 23
2508 // Audio block duration is smaller than video block duration,
2509 // so the buffered ranges should correspond to the audio blocks.
2510 AppendCluster(GenerateSingleStreamCluster(
2511 0, kAudioBlockDuration, kAudioTrackNum, kAudioBlockDuration));
2512 AppendCluster(GenerateSingleStreamCluster(
2513 0, kVideoBlockDuration, kVideoTrackNum, kVideoBlockDuration));
2515 CheckExpectedRanges("{ [0,23) }");
2517 // Audio: 300 -> 400
2518 // Video: 320 -> 420
2519 // Buffered Range: 320 -> 400 (end overlap)
2520 AppendCluster(GenerateSingleStreamCluster(300, 400, kAudioTrackNum, 50));
2521 AppendCluster(GenerateSingleStreamCluster(320, 420, kVideoTrackNum, 50));
2523 CheckExpectedRanges("{ [0,23) [320,400) }");
2525 // Audio: 520 -> 590
2526 // Video: 500 -> 570
2527 // Buffered Range: 520 -> 570 (front overlap)
2528 AppendCluster(GenerateSingleStreamCluster(520, 590, kAudioTrackNum, 70));
2529 AppendCluster(GenerateSingleStreamCluster(500, 570, kVideoTrackNum, 70));
2531 CheckExpectedRanges("{ [0,23) [320,400) [520,570) }");
2533 // Audio: 720 -> 750
2534 // Video: 700 -> 770
2535 // Buffered Range: 720 -> 750 (complete overlap, audio)
2536 AppendCluster(GenerateSingleStreamCluster(720, 750, kAudioTrackNum, 30));
2537 AppendCluster(GenerateSingleStreamCluster(700, 770, kVideoTrackNum, 70));
2539 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) }");
2541 // Audio: 900 -> 970
2542 // Video: 920 -> 950
2543 // Buffered Range: 920 -> 950 (complete overlap, video)
2544 AppendCluster(GenerateSingleStreamCluster(900, 970, kAudioTrackNum, 70));
2545 AppendCluster(GenerateSingleStreamCluster(920, 950, kVideoTrackNum, 30));
2547 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2549 // Appending within buffered range should not affect buffered ranges.
2550 AppendCluster(GenerateSingleStreamCluster(930, 950, kAudioTrackNum, 20));
2551 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2553 // Appending to single stream outside buffered ranges should not affect
2554 // buffered ranges.
2555 AppendCluster(GenerateSingleStreamCluster(1230, 1240, kVideoTrackNum, 10));
2556 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2559 TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideoText) {
2560 EXPECT_CALL(host_, AddTextStream(_, _));
2561 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
2563 // Append audio & video data
2564 AppendMuxedCluster(
2565 MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
2566 MuxedStreamInfo(kVideoTrackNum, "0K 33"));
2568 // Verify that a text track with no cues does not result in an empty buffered
2569 // range.
2570 CheckExpectedRanges("{ [0,46) }");
2572 // Add some text cues.
2573 AppendMuxedCluster(
2574 MuxedStreamInfo(kAudioTrackNum, "100K 123K"),
2575 MuxedStreamInfo(kVideoTrackNum, "100K 133"),
2576 MuxedStreamInfo(kTextTrackNum, "100K 200K"));
2578 // Verify that the text cues are not reflected in the buffered ranges.
2579 CheckExpectedRanges("{ [0,46) [100,146) }");
2581 // Remove the buffered ranges.
2582 demuxer_->Remove(kSourceId, base::TimeDelta(),
2583 base::TimeDelta::FromMilliseconds(250));
2584 CheckExpectedRanges("{ }");
2587 // Once MarkEndOfStream() is called, GetBufferedRanges should not cut off any
2588 // over-hanging tails at the end of the ranges as this is likely due to block
2589 // duration differences.
2590 TEST_F(ChunkDemuxerTest, GetBufferedRanges_EndOfStream) {
2591 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2593 AppendMuxedCluster(
2594 MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
2595 MuxedStreamInfo(kVideoTrackNum, "0K 33"));
2597 CheckExpectedRanges("{ [0,46) }");
2599 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
2600 MarkEndOfStream(PIPELINE_OK);
2602 // Verify that the range extends to the end of the video data.
2603 CheckExpectedRanges("{ [0,66) }");
2605 // Verify that the range reverts to the intersection when end of stream
2606 // has been cancelled.
2607 demuxer_->UnmarkEndOfStream();
2608 CheckExpectedRanges("{ [0,46) }");
2610 // Append and remove data so that the 2 streams' end ranges do not overlap.
2612 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(398)));
2613 AppendMuxedCluster(
2614 MuxedStreamInfo(kAudioTrackNum, "200K 223K"),
2615 MuxedStreamInfo(kVideoTrackNum, "200K 233 266 299 332K 365"));
2617 // At this point, the per-stream ranges are as follows:
2618 // Audio: [0,46) [200,246)
2619 // Video: [0,66) [200,398)
2620 CheckExpectedRanges("{ [0,46) [200,246) }");
2622 demuxer_->Remove(kSourceId, base::TimeDelta::FromMilliseconds(200),
2623 base::TimeDelta::FromMilliseconds(300));
2625 // At this point, the per-stream ranges are as follows:
2626 // Audio: [0,46)
2627 // Video: [0,66) [332,398)
2628 CheckExpectedRanges("{ [0,46) }");
2630 AppendMuxedCluster(
2631 MuxedStreamInfo(kAudioTrackNum, "200K 223K"),
2632 MuxedStreamInfo(kVideoTrackNum, "200K 233"));
2634 // At this point, the per-stream ranges are as follows:
2635 // Audio: [0,46) [200,246)
2636 // Video: [0,66) [200,266) [332,398)
2637 // NOTE: The last range on each stream do not overlap in time.
2638 CheckExpectedRanges("{ [0,46) [200,246) }");
2640 MarkEndOfStream(PIPELINE_OK);
2642 // NOTE: The last range on each stream gets extended to the highest
2643 // end timestamp according to the spec. The last audio range gets extended
2644 // from [200,246) to [200,398) which is why the intersection results in the
2645 // middle range getting larger AND the new range appearing.
2646 CheckExpectedRanges("{ [0,46) [200,266) [332,398) }");
2649 TEST_F(ChunkDemuxerTest, DifferentStreamTimecodes) {
2650 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2652 // Create a cluster where the video timecode begins 25ms after the audio.
2653 AppendCluster(GenerateCluster(0, 25, 8));
2655 Seek(base::TimeDelta::FromSeconds(0));
2656 GenerateExpectedReads(0, 25, 8);
2658 // Seek to 5 seconds.
2659 Seek(base::TimeDelta::FromSeconds(5));
2661 // Generate a cluster to fulfill this seek, where audio timecode begins 25ms
2662 // after the video.
2663 AppendCluster(GenerateCluster(5025, 5000, 8));
2664 GenerateExpectedReads(5025, 5000, 8);
2667 TEST_F(ChunkDemuxerTest, DifferentStreamTimecodesSeparateSources) {
2668 std::string audio_id = "audio1";
2669 std::string video_id = "video1";
2670 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2672 // Generate two streams where the video stream starts 5ms after the audio
2673 // stream and append them.
2674 AppendCluster(audio_id, GenerateSingleStreamCluster(
2675 25, 4 * kAudioBlockDuration + 25, kAudioTrackNum, kAudioBlockDuration));
2676 AppendCluster(video_id, GenerateSingleStreamCluster(
2677 30, 4 * kVideoBlockDuration + 30, kVideoTrackNum, kVideoBlockDuration));
2679 // Both streams should be able to fulfill a seek to 25.
2680 Seek(base::TimeDelta::FromMilliseconds(25));
2681 GenerateAudioStreamExpectedReads(25, 4);
2682 GenerateVideoStreamExpectedReads(30, 4);
2685 TEST_F(ChunkDemuxerTest, DifferentStreamTimecodesOutOfRange) {
2686 std::string audio_id = "audio1";
2687 std::string video_id = "video1";
2688 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2690 // Generate two streams where the video stream starts 10s after the audio
2691 // stream and append them.
2692 AppendCluster(audio_id, GenerateSingleStreamCluster(0,
2693 4 * kAudioBlockDuration + 0, kAudioTrackNum, kAudioBlockDuration));
2694 AppendCluster(video_id, GenerateSingleStreamCluster(10000,
2695 4 * kVideoBlockDuration + 10000, kVideoTrackNum, kVideoBlockDuration));
2697 // Should not be able to fulfill a seek to 0.
2698 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(0);
2699 demuxer_->StartWaitingForSeek(seek_time);
2700 demuxer_->Seek(seek_time,
2701 NewExpectedStatusCB(PIPELINE_ERROR_ABORT));
2702 ExpectRead(DemuxerStream::AUDIO, 0);
2703 ExpectEndOfStream(DemuxerStream::VIDEO);
2706 TEST_F(ChunkDemuxerTest, ClusterWithNoBuffers) {
2707 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2709 // Generate and append an empty cluster beginning at 0.
2710 AppendEmptyCluster(0);
2712 // Sanity check that data can be appended after this cluster correctly.
2713 AppendCluster(GenerateCluster(0, 2));
2714 ExpectRead(DemuxerStream::AUDIO, 0);
2715 ExpectRead(DemuxerStream::VIDEO, 0);
2718 TEST_F(ChunkDemuxerTest, CodecPrefixMatching) {
2719 ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
2721 #if defined(USE_PROPRIETARY_CODECS)
2722 expected = ChunkDemuxer::kOk;
2723 #endif
2725 std::vector<std::string> codecs;
2726 codecs.push_back("avc1.4D4041");
2728 EXPECT_EQ(demuxer_->AddId("source_id", "video/mp4", codecs), expected);
2731 // Test codec ID's that are not compliant with RFC6381, but have been
2732 // seen in the wild.
2733 TEST_F(ChunkDemuxerTest, CodecIDsThatAreNotRFC6381Compliant) {
2734 ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
2736 #if defined(USE_PROPRIETARY_CODECS)
2737 expected = ChunkDemuxer::kOk;
2738 #endif
2739 const char* codec_ids[] = {
2740 // GPAC places leading zeros on the audio object type.
2741 "mp4a.40.02",
2742 "mp4a.40.05"
2745 for (size_t i = 0; i < arraysize(codec_ids); ++i) {
2746 std::vector<std::string> codecs;
2747 codecs.push_back(codec_ids[i]);
2749 ChunkDemuxer::Status result =
2750 demuxer_->AddId("source_id", "audio/mp4", codecs);
2752 EXPECT_EQ(result, expected)
2753 << "Fail to add codec_id '" << codec_ids[i] << "'";
2755 if (result == ChunkDemuxer::kOk)
2756 demuxer_->RemoveId("source_id");
2760 TEST_F(ChunkDemuxerTest, EndOfStreamStillSetAfterSeek) {
2761 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2763 EXPECT_CALL(host_, SetDuration(_))
2764 .Times(AnyNumber());
2766 base::TimeDelta kLastAudioTimestamp = base::TimeDelta::FromMilliseconds(92);
2767 base::TimeDelta kLastVideoTimestamp = base::TimeDelta::FromMilliseconds(99);
2769 AppendCluster(kDefaultFirstCluster());
2770 AppendCluster(kDefaultSecondCluster());
2771 MarkEndOfStream(PIPELINE_OK);
2773 DemuxerStream::Status status;
2774 base::TimeDelta last_timestamp;
2776 // Verify that we can read audio & video to the end w/o problems.
2777 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2778 EXPECT_EQ(DemuxerStream::kOk, status);
2779 EXPECT_EQ(kLastAudioTimestamp, last_timestamp);
2781 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2782 EXPECT_EQ(DemuxerStream::kOk, status);
2783 EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
2785 // Seek back to 0 and verify that we can read to the end again..
2786 Seek(base::TimeDelta::FromMilliseconds(0));
2788 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2789 EXPECT_EQ(DemuxerStream::kOk, status);
2790 EXPECT_EQ(kLastAudioTimestamp, last_timestamp);
2792 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2793 EXPECT_EQ(DemuxerStream::kOk, status);
2794 EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
2797 TEST_F(ChunkDemuxerTest, GetBufferedRangesBeforeInitSegment) {
2798 EXPECT_CALL(*this, DemuxerOpened());
2799 demuxer_->Initialize(&host_, CreateInitDoneCB(PIPELINE_OK), true);
2800 ASSERT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
2801 ASSERT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
2803 CheckExpectedRanges("audio", "{ }");
2804 CheckExpectedRanges("video", "{ }");
2807 // Test that Seek() completes successfully when the first cluster
2808 // arrives.
2809 TEST_F(ChunkDemuxerTest, EndOfStreamDuringSeek) {
2810 InSequence s;
2812 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2814 AppendCluster(kDefaultFirstCluster());
2816 base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
2817 demuxer_->StartWaitingForSeek(seek_time);
2819 AppendCluster(kDefaultSecondCluster());
2820 EXPECT_CALL(host_, SetDuration(
2821 base::TimeDelta::FromMilliseconds(kDefaultSecondClusterEndTimestamp)));
2822 MarkEndOfStream(PIPELINE_OK);
2824 demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
2826 GenerateExpectedReads(0, 4);
2827 GenerateExpectedReads(46, 66, 5);
2829 EndOfStreamHelper end_of_stream_helper(demuxer_.get());
2830 end_of_stream_helper.RequestReads();
2831 end_of_stream_helper.CheckIfReadDonesWereCalled(true);
2834 TEST_F(ChunkDemuxerTest, ConfigChange_Video) {
2835 InSequence s;
2837 ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2839 DemuxerStream::Status status;
2840 base::TimeDelta last_timestamp;
2842 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2844 // Fetch initial video config and verify it matches what we expect.
2845 const VideoDecoderConfig& video_config_1 = video->video_decoder_config();
2846 ASSERT_TRUE(video_config_1.IsValidConfig());
2847 EXPECT_EQ(video_config_1.natural_size().width(), 320);
2848 EXPECT_EQ(video_config_1.natural_size().height(), 240);
2850 ExpectRead(DemuxerStream::VIDEO, 0);
2852 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2854 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2855 EXPECT_EQ(last_timestamp.InMilliseconds(), 501);
2857 // Fetch the new decoder config.
2858 const VideoDecoderConfig& video_config_2 = video->video_decoder_config();
2859 ASSERT_TRUE(video_config_2.IsValidConfig());
2860 EXPECT_EQ(video_config_2.natural_size().width(), 640);
2861 EXPECT_EQ(video_config_2.natural_size().height(), 360);
2863 ExpectRead(DemuxerStream::VIDEO, 527);
2865 // Read until the next config change.
2866 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2867 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2868 EXPECT_EQ(last_timestamp.InMilliseconds(), 793);
2870 // Get the new config and verify that it matches the first one.
2871 ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2873 ExpectRead(DemuxerStream::VIDEO, 801);
2875 // Read until the end of the stream just to make sure there aren't any other
2876 // config changes.
2877 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2878 ASSERT_EQ(status, DemuxerStream::kOk);
2881 TEST_F(ChunkDemuxerTest, ConfigChange_Audio) {
2882 InSequence s;
2884 ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2886 DemuxerStream::Status status;
2887 base::TimeDelta last_timestamp;
2889 DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
2891 // Fetch initial audio config and verify it matches what we expect.
2892 const AudioDecoderConfig& audio_config_1 = audio->audio_decoder_config();
2893 ASSERT_TRUE(audio_config_1.IsValidConfig());
2894 EXPECT_EQ(audio_config_1.samples_per_second(), 44100);
2895 EXPECT_EQ(audio_config_1.extra_data_size(), 3863u);
2897 ExpectRead(DemuxerStream::AUDIO, 0);
2899 // The first config change seen is from a splice frame representing an overlap
2900 // of buffer from config 1 by buffers from config 2.
2901 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2902 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2903 EXPECT_EQ(last_timestamp.InMilliseconds(), 524);
2905 // Fetch the new decoder config.
2906 const AudioDecoderConfig& audio_config_2 = audio->audio_decoder_config();
2907 ASSERT_TRUE(audio_config_2.IsValidConfig());
2908 EXPECT_EQ(audio_config_2.samples_per_second(), 44100);
2909 EXPECT_EQ(audio_config_2.extra_data_size(), 3935u);
2911 // The next config change is from a splice frame representing an overlap of
2912 // buffers from config 2 by buffers from config 1.
2913 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2914 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2915 EXPECT_EQ(last_timestamp.InMilliseconds(), 782);
2916 ASSERT_TRUE(audio_config_1.Matches(audio->audio_decoder_config()));
2918 // Read until the end of the stream just to make sure there aren't any other
2919 // config changes.
2920 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2921 ASSERT_EQ(status, DemuxerStream::kOk);
2922 EXPECT_EQ(last_timestamp.InMilliseconds(), 2744);
2925 TEST_F(ChunkDemuxerTest, ConfigChange_Seek) {
2926 InSequence s;
2928 ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2930 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2932 // Fetch initial video config and verify it matches what we expect.
2933 const VideoDecoderConfig& video_config_1 = video->video_decoder_config();
2934 ASSERT_TRUE(video_config_1.IsValidConfig());
2935 EXPECT_EQ(video_config_1.natural_size().width(), 320);
2936 EXPECT_EQ(video_config_1.natural_size().height(), 240);
2938 ExpectRead(DemuxerStream::VIDEO, 0);
2940 // Seek to a location with a different config.
2941 Seek(base::TimeDelta::FromMilliseconds(527));
2943 // Verify that the config change is signalled.
2944 ExpectConfigChanged(DemuxerStream::VIDEO);
2946 // Fetch the new decoder config and verify it is what we expect.
2947 const VideoDecoderConfig& video_config_2 = video->video_decoder_config();
2948 ASSERT_TRUE(video_config_2.IsValidConfig());
2949 EXPECT_EQ(video_config_2.natural_size().width(), 640);
2950 EXPECT_EQ(video_config_2.natural_size().height(), 360);
2952 // Verify that Read() will return a buffer now.
2953 ExpectRead(DemuxerStream::VIDEO, 527);
2955 // Seek back to the beginning and verify we get another config change.
2956 Seek(base::TimeDelta::FromMilliseconds(0));
2957 ExpectConfigChanged(DemuxerStream::VIDEO);
2958 ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2959 ExpectRead(DemuxerStream::VIDEO, 0);
2961 // Seek to a location that requires a config change and then
2962 // seek to a new location that has the same configuration as
2963 // the start of the file without a Read() in the middle.
2964 Seek(base::TimeDelta::FromMilliseconds(527));
2965 Seek(base::TimeDelta::FromMilliseconds(801));
2967 // Verify that no config change is signalled.
2968 ExpectRead(DemuxerStream::VIDEO, 801);
2969 ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2972 TEST_F(ChunkDemuxerTest, TimestampPositiveOffset) {
2973 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2975 ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(30)));
2976 AppendCluster(GenerateCluster(0, 2));
2978 Seek(base::TimeDelta::FromMilliseconds(30000));
2980 GenerateExpectedReads(30000, 2);
2983 TEST_F(ChunkDemuxerTest, TimestampNegativeOffset) {
2984 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2986 ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(-1)));
2987 AppendCluster(GenerateCluster(1000, 2));
2989 GenerateExpectedReads(0, 2);
2992 TEST_F(ChunkDemuxerTest, TimestampOffsetSeparateStreams) {
2993 std::string audio_id = "audio1";
2994 std::string video_id = "video1";
2995 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2997 ASSERT_TRUE(SetTimestampOffset(
2998 audio_id, base::TimeDelta::FromMilliseconds(-2500)));
2999 ASSERT_TRUE(SetTimestampOffset(
3000 video_id, base::TimeDelta::FromMilliseconds(-2500)));
3001 AppendCluster(audio_id, GenerateSingleStreamCluster(2500,
3002 2500 + kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
3003 AppendCluster(video_id, GenerateSingleStreamCluster(2500,
3004 2500 + kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
3005 GenerateAudioStreamExpectedReads(0, 4);
3006 GenerateVideoStreamExpectedReads(0, 4);
3008 Seek(base::TimeDelta::FromMilliseconds(27300));
3010 ASSERT_TRUE(SetTimestampOffset(
3011 audio_id, base::TimeDelta::FromMilliseconds(27300)));
3012 ASSERT_TRUE(SetTimestampOffset(
3013 video_id, base::TimeDelta::FromMilliseconds(27300)));
3014 AppendCluster(audio_id, GenerateSingleStreamCluster(
3015 0, kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
3016 AppendCluster(video_id, GenerateSingleStreamCluster(
3017 0, kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
3018 GenerateVideoStreamExpectedReads(27300, 4);
3019 GenerateAudioStreamExpectedReads(27300, 4);
3022 TEST_F(ChunkDemuxerTest, IsParsingMediaSegmentMidMediaSegment) {
3023 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3025 scoped_ptr<Cluster> cluster = GenerateCluster(0, 2);
3026 // Append only part of the cluster data.
3027 AppendData(cluster->data(), cluster->size() - 13);
3029 // Confirm we're in the middle of parsing a media segment.
3030 ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
3032 demuxer_->Abort(kSourceId,
3033 append_window_start_for_next_append_,
3034 append_window_end_for_next_append_,
3035 &timestamp_offset_map_[kSourceId]);
3037 // After Abort(), parsing should no longer be in the middle of a media
3038 // segment.
3039 ASSERT_FALSE(demuxer_->IsParsingMediaSegment(kSourceId));
3042 #if defined(USE_PROPRIETARY_CODECS)
3043 #if defined(ENABLE_MPEG2TS_STREAM_PARSER)
3044 TEST_F(ChunkDemuxerTest, EmitBuffersDuringAbort) {
3045 EXPECT_CALL(*this, DemuxerOpened());
3046 demuxer_->Initialize(
3047 &host_, CreateInitDoneCB(kInfiniteDuration(), PIPELINE_OK), true);
3048 EXPECT_EQ(ChunkDemuxer::kOk, AddIdForMp2tSource(kSourceId));
3050 // For info:
3051 // DTS/PTS derived using dvbsnoop -s ts -if bear-1280x720.ts -tssubdecode
3052 // Video: first PES:
3053 // PTS: 126912 (0x0001efc0) [= 90 kHz-Timestamp: 0:00:01.4101]
3054 // DTS: 123909 (0x0001e405) [= 90 kHz-Timestamp: 0:00:01.3767]
3055 // Audio: first PES:
3056 // PTS: 126000 (0x0001ec30) [= 90 kHz-Timestamp: 0:00:01.4000]
3057 // DTS: 123910 (0x0001e406) [= 90 kHz-Timestamp: 0:00:01.3767]
3058 // Video: last PES:
3059 // PTS: 370155 (0x0005a5eb) [= 90 kHz-Timestamp: 0:00:04.1128]
3060 // DTS: 367152 (0x00059a30) [= 90 kHz-Timestamp: 0:00:04.0794]
3061 // Audio: last PES:
3062 // PTS: 353788 (0x000565fc) [= 90 kHz-Timestamp: 0:00:03.9309]
3064 scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("bear-1280x720.ts");
3065 EXPECT_CALL(*this, InitSegmentReceived());
3066 AppendData(kSourceId, buffer->data(), buffer->data_size());
3068 // Confirm we're in the middle of parsing a media segment.
3069 ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
3071 // Abort on the Mpeg2 TS parser triggers the emission of the last video
3072 // buffer which is pending in the stream parser.
3073 Ranges<base::TimeDelta> range_before_abort =
3074 demuxer_->GetBufferedRanges(kSourceId);
3075 demuxer_->Abort(kSourceId,
3076 append_window_start_for_next_append_,
3077 append_window_end_for_next_append_,
3078 &timestamp_offset_map_[kSourceId]);
3079 Ranges<base::TimeDelta> range_after_abort =
3080 demuxer_->GetBufferedRanges(kSourceId);
3082 ASSERT_EQ(range_before_abort.size(), 1u);
3083 ASSERT_EQ(range_after_abort.size(), 1u);
3084 EXPECT_EQ(range_after_abort.start(0), range_before_abort.start(0));
3085 EXPECT_GT(range_after_abort.end(0), range_before_abort.end(0));
3088 TEST_F(ChunkDemuxerTest, SeekCompleteDuringAbort) {
3089 EXPECT_CALL(*this, DemuxerOpened());
3090 demuxer_->Initialize(
3091 &host_, CreateInitDoneCB(kInfiniteDuration(), PIPELINE_OK), true);
3092 EXPECT_EQ(ChunkDemuxer::kOk, AddIdForMp2tSource(kSourceId));
3094 // For info:
3095 // DTS/PTS derived using dvbsnoop -s ts -if bear-1280x720.ts -tssubdecode
3096 // Video: first PES:
3097 // PTS: 126912 (0x0001efc0) [= 90 kHz-Timestamp: 0:00:01.4101]
3098 // DTS: 123909 (0x0001e405) [= 90 kHz-Timestamp: 0:00:01.3767]
3099 // Audio: first PES:
3100 // PTS: 126000 (0x0001ec30) [= 90 kHz-Timestamp: 0:00:01.4000]
3101 // DTS: 123910 (0x0001e406) [= 90 kHz-Timestamp: 0:00:01.3767]
3102 // Video: last PES:
3103 // PTS: 370155 (0x0005a5eb) [= 90 kHz-Timestamp: 0:00:04.1128]
3104 // DTS: 367152 (0x00059a30) [= 90 kHz-Timestamp: 0:00:04.0794]
3105 // Audio: last PES:
3106 // PTS: 353788 (0x000565fc) [= 90 kHz-Timestamp: 0:00:03.9309]
3108 scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("bear-1280x720.ts");
3109 EXPECT_CALL(*this, InitSegmentReceived());
3110 AppendData(kSourceId, buffer->data(), buffer->data_size());
3112 // Confirm we're in the middle of parsing a media segment.
3113 ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
3115 // Seek to a time corresponding to buffers that will be emitted during the
3116 // abort.
3117 Seek(base::TimeDelta::FromMilliseconds(4110));
3119 // Abort on the Mpeg2 TS parser triggers the emission of the last video
3120 // buffer which is pending in the stream parser.
3121 demuxer_->Abort(kSourceId,
3122 append_window_start_for_next_append_,
3123 append_window_end_for_next_append_,
3124 &timestamp_offset_map_[kSourceId]);
3127 #endif
3128 #endif
3130 TEST_F(ChunkDemuxerTest, WebMIsParsingMediaSegmentDetection) {
3131 const uint8 kBuffer[] = {
3132 0x1F, 0x43, 0xB6, 0x75, 0x83, // CLUSTER (size = 3)
3133 0xE7, 0x81, 0x01, // Cluster TIMECODE (value = 1)
3135 0x1F, 0x43, 0xB6, 0x75, 0xFF, // CLUSTER (size = unknown; really 3 due to:)
3136 0xE7, 0x81, 0x02, // Cluster TIMECODE (value = 2)
3137 /* e.g. put some blocks here... */
3138 0x1A, 0x45, 0xDF, 0xA3, 0x8A, // EBMLHEADER (size = 10, not fully appended)
3141 // This array indicates expected return value of IsParsingMediaSegment()
3142 // following each incrementally appended byte in |kBuffer|.
3143 const bool kExpectedReturnValues[] = {
3144 false, false, false, false, true,
3145 true, true, false,
3147 false, false, false, false, true,
3148 true, true, true,
3150 true, true, true, true, false,
3153 static_assert(arraysize(kBuffer) == arraysize(kExpectedReturnValues),
3154 "test arrays out of sync");
3155 static_assert(arraysize(kBuffer) == sizeof(kBuffer),
3156 "there should be one byte per index");
3158 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3160 for (size_t i = 0; i < sizeof(kBuffer); i++) {
3161 DVLOG(3) << "Appending and testing index " << i;
3162 AppendData(kBuffer + i, 1);
3163 bool expected_return_value = kExpectedReturnValues[i];
3164 EXPECT_EQ(expected_return_value,
3165 demuxer_->IsParsingMediaSegment(kSourceId));
3169 TEST_F(ChunkDemuxerTest, DurationChange) {
3170 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3171 const int kStreamDuration = kDefaultDuration().InMilliseconds();
3173 // Add data leading up to the currently set duration.
3174 AppendCluster(GenerateCluster(kStreamDuration - kAudioBlockDuration,
3175 kStreamDuration - kVideoBlockDuration,
3176 2));
3178 CheckExpectedRanges(kSourceId, "{ [201191,201224) }");
3180 // Add data beginning at the currently set duration and expect a new duration
3181 // to be signaled. Note that the last video block will have a higher end
3182 // timestamp than the last audio block.
3183 const int kNewStreamDurationVideo = kStreamDuration + kVideoBlockDuration;
3184 EXPECT_CALL(host_, SetDuration(
3185 base::TimeDelta::FromMilliseconds(kNewStreamDurationVideo)));
3186 AppendCluster(GenerateCluster(kDefaultDuration().InMilliseconds(), 2));
3188 CheckExpectedRanges(kSourceId, "{ [201191,201247) }");
3190 // Add more data to the end of each media type. Note that the last audio block
3191 // will have a higher end timestamp than the last video block.
3192 const int kFinalStreamDuration = kStreamDuration + kAudioBlockDuration * 3;
3193 EXPECT_CALL(host_, SetDuration(
3194 base::TimeDelta::FromMilliseconds(kFinalStreamDuration)));
3195 AppendCluster(GenerateCluster(kStreamDuration + kAudioBlockDuration,
3196 kStreamDuration + kVideoBlockDuration,
3197 3));
3199 // See that the range has increased appropriately (but not to the full
3200 // duration of 201293, since there is not enough video appended for that).
3201 CheckExpectedRanges(kSourceId, "{ [201191,201290) }");
3204 TEST_F(ChunkDemuxerTest, DurationChangeTimestampOffset) {
3205 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3206 ASSERT_TRUE(SetTimestampOffset(kSourceId, kDefaultDuration()));
3207 EXPECT_CALL(host_, SetDuration(
3208 kDefaultDuration() + base::TimeDelta::FromMilliseconds(
3209 kVideoBlockDuration * 2)));
3210 AppendCluster(GenerateCluster(0, 4));
3213 TEST_F(ChunkDemuxerTest, EndOfStreamTruncateDuration) {
3214 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3216 AppendCluster(kDefaultFirstCluster());
3218 EXPECT_CALL(host_, SetDuration(
3219 base::TimeDelta::FromMilliseconds(kDefaultFirstClusterEndTimestamp)));
3220 MarkEndOfStream(PIPELINE_OK);
3224 TEST_F(ChunkDemuxerTest, ZeroLengthAppend) {
3225 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3226 AppendData(NULL, 0);
3229 TEST_F(ChunkDemuxerTest, AppendAfterEndOfStream) {
3230 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3232 EXPECT_CALL(host_, SetDuration(_))
3233 .Times(AnyNumber());
3235 AppendCluster(kDefaultFirstCluster());
3236 MarkEndOfStream(PIPELINE_OK);
3238 demuxer_->UnmarkEndOfStream();
3240 AppendCluster(kDefaultSecondCluster());
3241 MarkEndOfStream(PIPELINE_OK);
3244 // Test receiving a Shutdown() call before we get an Initialize()
3245 // call. This can happen if video element gets destroyed before
3246 // the pipeline has a chance to initialize the demuxer.
3247 TEST_F(ChunkDemuxerTest, Shutdown_BeforeInitialize) {
3248 demuxer_->Shutdown();
3249 demuxer_->Initialize(
3250 &host_, CreateInitDoneCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
3251 message_loop_.RunUntilIdle();
3254 // Verifies that signaling end of stream while stalled at a gap
3255 // boundary does not trigger end of stream buffers to be returned.
3256 TEST_F(ChunkDemuxerTest, EndOfStreamWhileWaitingForGapToBeFilled) {
3257 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3259 AppendCluster(0, 10);
3260 AppendCluster(300, 10);
3261 CheckExpectedRanges(kSourceId, "{ [0,132) [300,432) }");
3263 GenerateExpectedReads(0, 10);
3265 bool audio_read_done = false;
3266 bool video_read_done = false;
3267 ReadAudio(base::Bind(&OnReadDone,
3268 base::TimeDelta::FromMilliseconds(138),
3269 &audio_read_done));
3270 ReadVideo(base::Bind(&OnReadDone,
3271 base::TimeDelta::FromMilliseconds(138),
3272 &video_read_done));
3274 // Verify that the reads didn't complete
3275 EXPECT_FALSE(audio_read_done);
3276 EXPECT_FALSE(video_read_done);
3278 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(438)));
3279 MarkEndOfStream(PIPELINE_OK);
3281 // Verify that the reads still haven't completed.
3282 EXPECT_FALSE(audio_read_done);
3283 EXPECT_FALSE(video_read_done);
3285 demuxer_->UnmarkEndOfStream();
3287 AppendCluster(138, 22);
3289 message_loop_.RunUntilIdle();
3291 CheckExpectedRanges(kSourceId, "{ [0,435) }");
3293 // Verify that the reads have completed.
3294 EXPECT_TRUE(audio_read_done);
3295 EXPECT_TRUE(video_read_done);
3297 // Read the rest of the buffers.
3298 GenerateExpectedReads(161, 171, 20);
3300 // Verify that reads block because the append cleared the end of stream state.
3301 audio_read_done = false;
3302 video_read_done = false;
3303 ReadAudio(base::Bind(&OnReadDone_EOSExpected,
3304 &audio_read_done));
3305 ReadVideo(base::Bind(&OnReadDone_EOSExpected,
3306 &video_read_done));
3308 // Verify that the reads don't complete.
3309 EXPECT_FALSE(audio_read_done);
3310 EXPECT_FALSE(video_read_done);
3312 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(437)));
3313 MarkEndOfStream(PIPELINE_OK);
3315 EXPECT_TRUE(audio_read_done);
3316 EXPECT_TRUE(video_read_done);
3319 TEST_F(ChunkDemuxerTest, CanceledSeekDuringInitialPreroll) {
3320 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3322 // Cancel preroll.
3323 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(200);
3324 demuxer_->CancelPendingSeek(seek_time);
3326 // Initiate the seek to the new location.
3327 Seek(seek_time);
3329 // Append data to satisfy the seek.
3330 AppendCluster(seek_time.InMilliseconds(), 10);
3333 TEST_F(ChunkDemuxerTest, SetMemoryLimitType) {
3334 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3336 // Set different memory limits for audio and video.
3337 demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 10 * kBlockSize);
3338 demuxer_->SetMemoryLimits(DemuxerStream::VIDEO, 5 * kBlockSize + 1);
3340 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(1000);
3342 // Append data at the start that can be garbage collected:
3343 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 0, 10);
3344 AppendSingleStreamCluster(kSourceId, kVideoTrackNum, 0, 5);
3346 // We should be right at buffer limit, should pass
3347 EXPECT_TRUE(demuxer_->EvictCodedFrames(
3348 kSourceId, base::TimeDelta::FromMilliseconds(0), 0));
3350 CheckExpectedRanges(DemuxerStream::AUDIO, "{ [0,230) }");
3351 CheckExpectedRanges(DemuxerStream::VIDEO, "{ [0,165) }");
3353 // Seek so we can garbage collect the data appended above.
3354 Seek(seek_time);
3356 // Append data at seek_time.
3357 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3358 seek_time.InMilliseconds(), 10);
3359 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3360 seek_time.InMilliseconds(), 5);
3362 // We should delete first append, and be exactly at buffer limit
3363 EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time, 0));
3365 // Verify that the old data, and nothing more, has been garbage collected.
3366 CheckExpectedRanges(DemuxerStream::AUDIO, "{ [1000,1230) }");
3367 CheckExpectedRanges(DemuxerStream::VIDEO, "{ [1000,1165) }");
3370 TEST_F(ChunkDemuxerTest, GCDuringSeek) {
3371 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3373 demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 5 * kBlockSize);
3375 base::TimeDelta seek_time1 = base::TimeDelta::FromMilliseconds(1000);
3376 base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(500);
3378 // Initiate a seek to |seek_time1|.
3379 Seek(seek_time1);
3381 // Append data to satisfy the first seek request.
3382 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3383 seek_time1.InMilliseconds(), 5);
3384 CheckExpectedRanges(kSourceId, "{ [1000,1115) }");
3386 // We are under memory limit, so Evict should be a no-op.
3387 EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time1, 0));
3388 CheckExpectedRanges(kSourceId, "{ [1000,1115) }");
3390 // Signal that the second seek is starting.
3391 demuxer_->StartWaitingForSeek(seek_time2);
3393 // Append data to satisfy the second seek.
3394 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3395 seek_time2.InMilliseconds(), 5);
3396 CheckExpectedRanges(kSourceId, "{ [500,615) [1000,1115) }");
3398 // We are now over our memory usage limit. We have just seeked to |seek_time2|
3399 // so data around 500ms position should be preserved, while the previous
3400 // append at 1000ms should be removed.
3401 EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time2, 0));
3402 CheckExpectedRanges(kSourceId, "{ [500,615) }");
3404 // Complete the seek.
3405 demuxer_->Seek(seek_time2, NewExpectedStatusCB(PIPELINE_OK));
3407 // Append more data and make sure that we preserve both the buffered range
3408 // around |seek_time2|, because that's the current playback position,
3409 // and the newly appended range, since this is the most recent append.
3410 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 700, 5);
3411 EXPECT_FALSE(demuxer_->EvictCodedFrames(kSourceId, seek_time2, 0));
3412 CheckExpectedRanges(kSourceId, "{ [500,615) [700,815) }");
3415 TEST_F(ChunkDemuxerTest, GCKeepPlayhead) {
3416 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3418 demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 5 * kBlockSize);
3420 // Append data at the start that can be garbage collected:
3421 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 0, 10);
3422 CheckExpectedRanges(kSourceId, "{ [0,230) }");
3424 // We expect garbage collection to fail, as we don't want to spontaneously
3425 // create gaps in source buffer stream. Gaps could break playback for many
3426 // clients, who don't bother to check ranges after append.
3427 EXPECT_FALSE(demuxer_->EvictCodedFrames(
3428 kSourceId, base::TimeDelta::FromMilliseconds(0), 0));
3429 CheckExpectedRanges(kSourceId, "{ [0,230) }");
3431 // Increase media_time a bit, this will allow some data to be collected, but
3432 // we are still over memory usage limit.
3433 base::TimeDelta seek_time1 = base::TimeDelta::FromMilliseconds(23*2);
3434 Seek(seek_time1);
3435 EXPECT_FALSE(demuxer_->EvictCodedFrames(kSourceId, seek_time1, 0));
3436 CheckExpectedRanges(kSourceId, "{ [46,230) }");
3438 base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(23*4);
3439 Seek(seek_time2);
3440 EXPECT_FALSE(demuxer_->EvictCodedFrames(kSourceId, seek_time2, 0));
3441 CheckExpectedRanges(kSourceId, "{ [92,230) }");
3443 // media_time has progressed to a point where we can collect enough data to
3444 // be under memory limit, so Evict should return true.
3445 base::TimeDelta seek_time3 = base::TimeDelta::FromMilliseconds(23*6);
3446 Seek(seek_time3);
3447 EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time3, 0));
3448 // Strictly speaking the current playback time is 23*6==138ms, so we could
3449 // release data up to 138ms, but we only release as much data as necessary
3450 // to bring memory usage under the limit, so we release only up to 115ms.
3451 CheckExpectedRanges(kSourceId, "{ [115,230) }");
3454 TEST_F(ChunkDemuxerTest, AppendWindow_Video) {
3455 ASSERT_TRUE(InitDemuxer(HAS_VIDEO));
3456 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3458 // Set the append window to [50,280).
3459 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
3460 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3462 // Append a cluster that starts before and ends after the append window.
3463 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3464 "0K 30 60 90 120K 150 180 210 240K 270 300 330K");
3466 // Verify that GOPs that start outside the window are not included
3467 // in the buffer. Also verify that buffers that start inside the
3468 // window and extend beyond the end of the window are not included.
3469 CheckExpectedRanges(kSourceId, "{ [120,270) }");
3470 CheckExpectedBuffers(stream, "120K 150 180 210 240K");
3472 // Extend the append window to [50,650).
3473 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3475 // Append more data and verify that adding buffers start at the next
3476 // key frame.
3477 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3478 "360 390 420K 450 480 510 540K 570 600 630K");
3479 CheckExpectedRanges(kSourceId, "{ [120,270) [420,630) }");
3482 TEST_F(ChunkDemuxerTest, AppendWindow_Audio) {
3483 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3484 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3486 // Set the append window to [50,280).
3487 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
3488 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3490 // Append a cluster that starts before and ends after the append window.
3491 AppendSingleStreamCluster(
3492 kSourceId, kAudioTrackNum,
3493 "0K 30K 60K 90K 120K 150K 180K 210K 240K 270K 300K 330K");
3495 // Verify that frames that end outside the window are not included
3496 // in the buffer. Also verify that buffers that start inside the
3497 // window and extend beyond the end of the window are not included.
3499 // The first 50ms of the range should be truncated since it overlaps
3500 // the start of the append window.
3501 CheckExpectedRanges(kSourceId, "{ [50,280) }");
3503 // The "50P" buffer is the "0" buffer marked for complete discard. The next
3504 // "50" buffer is the "30" buffer marked with 20ms of start discard.
3505 CheckExpectedBuffers(stream, "50KP 50K 60K 90K 120K 150K 180K 210K 240K");
3507 // Extend the append window to [50,650).
3508 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3510 // Append more data and verify that a new range is created.
3511 AppendSingleStreamCluster(
3512 kSourceId, kAudioTrackNum,
3513 "360K 390K 420K 450K 480K 510K 540K 570K 600K 630K");
3514 CheckExpectedRanges(kSourceId, "{ [50,280) [360,650) }");
3517 TEST_F(ChunkDemuxerTest, AppendWindow_AudioOverlapStartAndEnd) {
3518 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3520 // Set the append window to [10,20).
3521 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(10);
3522 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
3524 // Append a cluster that starts before and ends after the append window.
3525 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K");
3527 // Verify the append is clipped to the append window.
3528 CheckExpectedRanges(kSourceId, "{ [10,20) }");
3531 TEST_F(ChunkDemuxerTest, AppendWindow_WebMFile_AudioOnly) {
3532 EXPECT_CALL(*this, DemuxerOpened());
3533 demuxer_->Initialize(
3534 &host_,
3535 CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744), PIPELINE_OK),
3536 true);
3537 ASSERT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, HAS_AUDIO));
3539 // Set the append window to [50,150).
3540 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
3541 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(150);
3543 // Read a WebM file into memory and send the data to the demuxer. The chunk
3544 // size has been chosen carefully to ensure the preroll buffer used by the
3545 // partial append window trim must come from a previous Append() call.
3546 scoped_refptr<DecoderBuffer> buffer =
3547 ReadTestDataFile("bear-320x240-audio-only.webm");
3548 EXPECT_CALL(*this, InitSegmentReceived());
3549 AppendDataInPieces(buffer->data(), buffer->data_size(), 128);
3551 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3552 CheckExpectedBuffers(stream, "50KP 50K 62K 86K 109K 122K 125K 128K");
3555 TEST_F(ChunkDemuxerTest, AppendWindow_AudioConfigUpdateRemovesPreroll) {
3556 EXPECT_CALL(*this, DemuxerOpened());
3557 demuxer_->Initialize(
3558 &host_,
3559 CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744), PIPELINE_OK),
3560 true);
3561 ASSERT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, HAS_AUDIO));
3563 // Set the append window such that the first file is completely before the
3564 // append window.
3565 // TODO(wolenetz/acolwell): Update this duration once the files are fixed to
3566 // have the correct duration in their init segments, and the
3567 // CreateInitDoneCB() call, above, is fixed to used that duration. See
3568 // http://crbug.com/354284.
3569 const base::TimeDelta duration_1 = base::TimeDelta::FromMilliseconds(2746);
3570 append_window_start_for_next_append_ = duration_1;
3572 // Read a WebM file into memory and append the data.
3573 scoped_refptr<DecoderBuffer> buffer =
3574 ReadTestDataFile("bear-320x240-audio-only.webm");
3575 EXPECT_CALL(*this, InitSegmentReceived());
3576 AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
3577 CheckExpectedRanges(kSourceId, "{ }");
3579 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3580 AudioDecoderConfig config_1 = stream->audio_decoder_config();
3582 // Read a second WebM with a different config in and append the data.
3583 scoped_refptr<DecoderBuffer> buffer2 =
3584 ReadTestDataFile("bear-320x240-audio-only-48khz.webm");
3585 EXPECT_CALL(*this, InitSegmentReceived());
3586 EXPECT_CALL(host_, SetDuration(_)).Times(AnyNumber());
3587 ASSERT_TRUE(SetTimestampOffset(kSourceId, duration_1));
3588 AppendDataInPieces(buffer2->data(), buffer2->data_size(), 512);
3589 CheckExpectedRanges(kSourceId, "{ [2746,5519) }");
3591 Seek(duration_1);
3592 ExpectConfigChanged(DemuxerStream::AUDIO);
3593 ASSERT_FALSE(config_1.Matches(stream->audio_decoder_config()));
3594 CheckExpectedBuffers(stream, "2746K 2767K 2789K 2810K");
3597 TEST_F(ChunkDemuxerTest, AppendWindow_Text) {
3598 DemuxerStream* text_stream = NULL;
3599 EXPECT_CALL(host_, AddTextStream(_, _))
3600 .WillOnce(SaveArg<0>(&text_stream));
3601 ASSERT_TRUE(InitDemuxer(HAS_VIDEO | HAS_TEXT));
3602 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3604 // Set the append window to [20,280).
3605 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
3606 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3608 // Append a cluster that starts before and ends after the append
3609 // window.
3610 AppendMuxedCluster(
3611 MuxedStreamInfo(kVideoTrackNum,
3612 "0K 30 60 90 120K 150 180 210 240K 270 300 330K"),
3613 MuxedStreamInfo(kTextTrackNum, "0K 100K 200K 300K" ));
3615 // Verify that text cues that start outside the window are not included
3616 // in the buffer. Also verify that cues that extend beyond the
3617 // window are not included.
3618 CheckExpectedRanges(kSourceId, "{ [100,270) }");
3619 CheckExpectedBuffers(video_stream, "120K 150 180 210 240K");
3620 CheckExpectedBuffers(text_stream, "100K");
3622 // Extend the append window to [20,650).
3623 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3625 // Append more data and verify that a new range is created.
3626 AppendMuxedCluster(
3627 MuxedStreamInfo(kVideoTrackNum,
3628 "360 390 420K 450 480 510 540K 570 600 630K"),
3629 MuxedStreamInfo(kTextTrackNum, "400K 500K 600K 700K" ));
3630 CheckExpectedRanges(kSourceId, "{ [100,270) [400,630) }");
3632 // Seek to the new range and verify that the expected buffers are returned.
3633 Seek(base::TimeDelta::FromMilliseconds(420));
3634 CheckExpectedBuffers(video_stream, "420K 450 480 510 540K 570 600");
3635 CheckExpectedBuffers(text_stream, "400K 500K");
3638 TEST_F(ChunkDemuxerTest, StartWaitingForSeekAfterParseError) {
3639 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3640 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
3641 AppendGarbage();
3642 base::TimeDelta seek_time = base::TimeDelta::FromSeconds(50);
3643 demuxer_->StartWaitingForSeek(seek_time);
3646 TEST_F(ChunkDemuxerTest, Remove_AudioVideoText) {
3647 DemuxerStream* text_stream = NULL;
3648 EXPECT_CALL(host_, AddTextStream(_, _))
3649 .WillOnce(SaveArg<0>(&text_stream));
3650 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
3652 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3653 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3655 AppendMuxedCluster(
3656 MuxedStreamInfo(kAudioTrackNum, "0K 20K 40K 60K 80K 100K 120K 140K"),
3657 MuxedStreamInfo(kVideoTrackNum, "0K 30 60 90 120K 150 180"),
3658 MuxedStreamInfo(kTextTrackNum, "0K 100K 200K"));
3660 CheckExpectedBuffers(audio_stream, "0K 20K 40K 60K 80K 100K 120K 140K");
3661 CheckExpectedBuffers(video_stream, "0K 30 60 90 120K 150 180");
3662 CheckExpectedBuffers(text_stream, "0K 100K 200K");
3664 // Remove the buffers that were added.
3665 demuxer_->Remove(kSourceId, base::TimeDelta(),
3666 base::TimeDelta::FromMilliseconds(300));
3668 // Verify that all the appended data has been removed.
3669 CheckExpectedRanges(kSourceId, "{ }");
3671 // Append new buffers that are clearly different than the original
3672 // ones and verify that only the new buffers are returned.
3673 AppendMuxedCluster(
3674 MuxedStreamInfo(kAudioTrackNum, "1K 21K 41K 61K 81K 101K 121K 141K"),
3675 MuxedStreamInfo(kVideoTrackNum, "1K 31 61 91 121K 151 181"),
3676 MuxedStreamInfo(kTextTrackNum, "1K 101K 201K"));
3678 Seek(base::TimeDelta());
3679 CheckExpectedBuffers(audio_stream, "1K 21K 41K 61K 81K 101K 121K 141K");
3680 CheckExpectedBuffers(video_stream, "1K 31 61 91 121K 151 181");
3681 CheckExpectedBuffers(text_stream, "1K 101K 201K");
3684 TEST_F(ChunkDemuxerTest, Remove_StartAtDuration) {
3685 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3686 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3688 // Set the duration to something small so that the append that
3689 // follows updates the duration to reflect the end of the appended data.
3690 EXPECT_CALL(host_, SetDuration(
3691 base::TimeDelta::FromMilliseconds(1)));
3692 demuxer_->SetDuration(0.001);
3694 EXPECT_CALL(host_, SetDuration(
3695 base::TimeDelta::FromMilliseconds(160)));
3696 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3697 "0K 20K 40K 60K 80K 100K 120K 140K");
3699 CheckExpectedRanges(kSourceId, "{ [0,160) }");
3700 CheckExpectedBuffers(audio_stream, "0K 20K 40K 60K 80K 100K 120K 140K");
3702 demuxer_->Remove(kSourceId,
3703 base::TimeDelta::FromSecondsD(demuxer_->GetDuration()),
3704 kInfiniteDuration());
3706 Seek(base::TimeDelta());
3707 CheckExpectedRanges(kSourceId, "{ [0,160) }");
3708 CheckExpectedBuffers(audio_stream, "0K 20K 40K 60K 80K 100K 120K 140K");
3711 // Verifies that a Seek() will complete without text cues for
3712 // the seek point and will return cues after the seek position
3713 // when they are eventually appended.
3714 TEST_F(ChunkDemuxerTest, SeekCompletesWithoutTextCues) {
3715 DemuxerStream* text_stream = NULL;
3716 EXPECT_CALL(host_, AddTextStream(_, _))
3717 .WillOnce(SaveArg<0>(&text_stream));
3718 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
3720 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3721 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3723 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(120);
3724 bool seek_cb_was_called = false;
3725 demuxer_->StartWaitingForSeek(seek_time);
3726 demuxer_->Seek(seek_time,
3727 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
3728 message_loop_.RunUntilIdle();
3730 EXPECT_FALSE(seek_cb_was_called);
3732 bool text_read_done = false;
3733 text_stream->Read(base::Bind(&OnReadDone,
3734 base::TimeDelta::FromMilliseconds(225),
3735 &text_read_done));
3737 // Append audio & video data so the seek completes.
3738 AppendMuxedCluster(
3739 MuxedStreamInfo(kAudioTrackNum,
3740 "0K 20K 40K 60K 80K 100K 120K 140K 160K 180K 200K"),
3741 MuxedStreamInfo(kVideoTrackNum, "0K 30 60 90 120K 150 180 210"));
3743 message_loop_.RunUntilIdle();
3744 EXPECT_TRUE(seek_cb_was_called);
3745 EXPECT_FALSE(text_read_done);
3747 // Read some audio & video buffers to further verify seek completion.
3748 CheckExpectedBuffers(audio_stream, "120K 140K");
3749 CheckExpectedBuffers(video_stream, "120K 150");
3751 EXPECT_FALSE(text_read_done);
3753 // Append text cues that start after the seek point and verify that
3754 // they are returned by Read() calls.
3755 AppendMuxedCluster(
3756 MuxedStreamInfo(kAudioTrackNum, "220K 240K 260K 280K"),
3757 MuxedStreamInfo(kVideoTrackNum, "240K 270 300 330"),
3758 MuxedStreamInfo(kTextTrackNum, "225K 275K 325K"));
3760 message_loop_.RunUntilIdle();
3761 EXPECT_TRUE(text_read_done);
3763 // NOTE: we start at 275 here because the buffer at 225 was returned
3764 // to the pending read initiated above.
3765 CheckExpectedBuffers(text_stream, "275K 325K");
3767 // Verify that audio & video streams continue to return expected values.
3768 CheckExpectedBuffers(audio_stream, "160K 180K");
3769 CheckExpectedBuffers(video_stream, "180 210");
3772 TEST_F(ChunkDemuxerTest, ClusterWithUnknownSize) {
3773 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3775 AppendCluster(GenerateCluster(0, 0, 4, true));
3776 CheckExpectedRanges(kSourceId, "{ [0,46) }");
3778 // A new cluster indicates end of the previous cluster with unknown size.
3779 AppendCluster(GenerateCluster(46, 66, 5, true));
3780 CheckExpectedRanges(kSourceId, "{ [0,115) }");
3783 TEST_F(ChunkDemuxerTest, CuesBetweenClustersWithUnknownSize) {
3784 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3786 // Add two clusters separated by Cues in a single Append() call.
3787 scoped_ptr<Cluster> cluster = GenerateCluster(0, 0, 4, true);
3788 std::vector<uint8> data(cluster->data(), cluster->data() + cluster->size());
3789 data.insert(data.end(), kCuesHeader, kCuesHeader + sizeof(kCuesHeader));
3790 cluster = GenerateCluster(46, 66, 5, true);
3791 data.insert(data.end(), cluster->data(), cluster->data() + cluster->size());
3792 AppendData(&*data.begin(), data.size());
3794 CheckExpectedRanges(kSourceId, "{ [0,115) }");
3797 TEST_F(ChunkDemuxerTest, CuesBetweenClusters) {
3798 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3800 AppendCluster(GenerateCluster(0, 0, 4));
3801 AppendData(kCuesHeader, sizeof(kCuesHeader));
3802 AppendCluster(GenerateCluster(46, 66, 5));
3803 CheckExpectedRanges(kSourceId, "{ [0,115) }");
3806 TEST_F(ChunkDemuxerTest, EvictCodedFramesTest) {
3807 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3808 demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 10 * kBlockSize);
3809 demuxer_->SetMemoryLimits(DemuxerStream::VIDEO, 15 * kBlockSize);
3810 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3811 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3813 const char* kAudioStreamInfo = "0K 40K 80K 120K 160K 200K 240K 280K";
3814 const char* kVideoStreamInfo = "0K 10 20K 30 40K 50 60K 70 80K 90 100K "
3815 "110 120K 130 140K";
3816 // Append 8 blocks (80 bytes) of data to audio stream and 15 blocks (150
3817 // bytes) to video stream.
3818 AppendMuxedCluster(
3819 MuxedStreamInfo(kAudioTrackNum, kAudioStreamInfo),
3820 MuxedStreamInfo(kVideoTrackNum, kVideoStreamInfo));
3821 CheckExpectedBuffers(audio_stream, kAudioStreamInfo);
3822 CheckExpectedBuffers(video_stream, kVideoStreamInfo);
3824 // If we want to append 80 more blocks of muxed a+v data and the current
3825 // position is 0, that will fail, because EvictCodedFrames won't remove the
3826 // data after the current playback position.
3827 ASSERT_FALSE(demuxer_->EvictCodedFrames(kSourceId,
3828 base::TimeDelta::FromMilliseconds(0),
3829 80));
3830 // EvictCodedFrames has failed, so data should be unchanged.
3831 Seek(base::TimeDelta::FromMilliseconds(0));
3832 CheckExpectedBuffers(audio_stream, kAudioStreamInfo);
3833 CheckExpectedBuffers(video_stream, kVideoStreamInfo);
3835 // But if we pretend that playback position has moved to 120ms, that allows
3836 // EvictCodedFrames to garbage-collect enough data to succeed.
3837 ASSERT_TRUE(demuxer_->EvictCodedFrames(kSourceId,
3838 base::TimeDelta::FromMilliseconds(120),
3839 80));
3841 Seek(base::TimeDelta::FromMilliseconds(0));
3842 // Audio stream had 8 buffers, video stream had 15. We told EvictCodedFrames
3843 // that the new data size is 8 blocks muxed, i.e. 80 bytes. Given the current
3844 // ratio of video to the total data size (15 : (8+15) ~= 0.65) the estimated
3845 // sizes of video and audio data in the new 80 byte chunk are 52 bytes for
3846 // video (80*0.65 = 52) and 28 bytes for audio (80 - 52).
3847 // Given these numbers MSE GC will remove just one audio block (since current
3848 // audio size is 80 bytes, new data is 28 bytes, we need to remove just one 10
3849 // byte block to stay under 100 bytes memory limit after append
3850 // 80 - 10 + 28 = 98).
3851 // For video stream 150 + 52 = 202. Video limit is 150 bytes. We need to
3852 // remove at least 6 blocks to stay under limit.
3853 CheckExpectedBuffers(audio_stream, "40K 80K 120K 160K 200K 240K 280K");
3854 CheckExpectedBuffers(video_stream, "60K 70 80K 90 100K 110 120K 130 140K");
3857 } // namespace media