Roll src/third_party/WebKit 787a07c:716df21 (svn 201034:201036)
[chromium-blink-merge.git] / media / filters / chunk_demuxer_unittest.cc
blobbfe2842a8515b8f795e8b0b6e7582fc10a0a3193
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include <algorithm>
7 #include "base/bind.h"
8 #include "base/message_loop/message_loop.h"
9 #include "base/strings/string_number_conversions.h"
10 #include "base/strings/string_split.h"
11 #include "base/strings/string_util.h"
12 #include "media/base/audio_decoder_config.h"
13 #include "media/base/decoder_buffer.h"
14 #include "media/base/decrypt_config.h"
15 #include "media/base/media_log.h"
16 #include "media/base/mock_demuxer_host.h"
17 #include "media/base/test_data_util.h"
18 #include "media/base/test_helpers.h"
19 #include "media/filters/chunk_demuxer.h"
20 #include "media/formats/webm/cluster_builder.h"
21 #include "media/formats/webm/webm_constants.h"
22 #include "testing/gtest/include/gtest/gtest.h"
24 using ::testing::AnyNumber;
25 using ::testing::Exactly;
26 using ::testing::InSequence;
27 using ::testing::NotNull;
28 using ::testing::Return;
29 using ::testing::SaveArg;
30 using ::testing::SetArgumentPointee;
31 using ::testing::_;
33 namespace media {
35 const uint8 kTracksHeader[] = {
36 0x16, 0x54, 0xAE, 0x6B, // Tracks ID
37 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // tracks(size = 0)
40 // WebM Block bytes that represent a VP8 key frame.
41 const uint8 kVP8Keyframe[] = {
42 0x010, 0x00, 0x00, 0x9d, 0x01, 0x2a, 0x00, 0x10, 0x00, 0x10, 0x00
45 // WebM Block bytes that represent a VP8 interframe.
46 const uint8 kVP8Interframe[] = { 0x11, 0x00, 0x00 };
48 const uint8 kCuesHeader[] = {
49 0x1C, 0x53, 0xBB, 0x6B, // Cues ID
50 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // cues(size = 0)
53 const uint8 kEncryptedMediaInitData[] = {
54 0x68, 0xFE, 0xF9, 0xA1, 0xB3, 0x0D, 0x6B, 0x4D,
55 0xF2, 0x22, 0xB5, 0x0B, 0x4D, 0xE9, 0xE9, 0x95,
58 const int kTracksHeaderSize = sizeof(kTracksHeader);
59 const int kTracksSizeOffset = 4;
61 // The size of TrackEntry element in test file "webm_vorbis_track_entry" starts
62 // at index 1 and spans 8 bytes.
63 const int kAudioTrackSizeOffset = 1;
64 const int kAudioTrackSizeWidth = 8;
65 const int kAudioTrackEntryHeaderSize =
66 kAudioTrackSizeOffset + kAudioTrackSizeWidth;
68 // The size of TrackEntry element in test file "webm_vp8_track_entry" starts at
69 // index 1 and spans 8 bytes.
70 const int kVideoTrackSizeOffset = 1;
71 const int kVideoTrackSizeWidth = 8;
72 const int kVideoTrackEntryHeaderSize =
73 kVideoTrackSizeOffset + kVideoTrackSizeWidth;
75 const int kVideoTrackNum = 1;
76 const int kAudioTrackNum = 2;
77 const int kTextTrackNum = 3;
78 const int kAlternateTextTrackNum = 4;
80 const int kAudioBlockDuration = 23;
81 const int kVideoBlockDuration = 33;
82 const int kTextBlockDuration = 100;
83 const int kBlockSize = 10;
85 const char kSourceId[] = "SourceId";
86 const char kDefaultFirstClusterRange[] = "{ [0,46) }";
87 const int kDefaultFirstClusterEndTimestamp = 66;
88 const int kDefaultSecondClusterEndTimestamp = 132;
90 base::TimeDelta kDefaultDuration() {
91 return base::TimeDelta::FromMilliseconds(201224);
94 // Write an integer into buffer in the form of vint that spans 8 bytes.
95 // The data pointed by |buffer| should be at least 8 bytes long.
96 // |number| should be in the range 0 <= number < 0x00FFFFFFFFFFFFFF.
97 static void WriteInt64(uint8* buffer, int64 number) {
98 DCHECK(number >= 0 && number < 0x00FFFFFFFFFFFFFFLL);
99 buffer[0] = 0x01;
100 int64 tmp = number;
101 for (int i = 7; i > 0; i--) {
102 buffer[i] = tmp & 0xff;
103 tmp >>= 8;
107 MATCHER_P(HasTimestamp, timestamp_in_ms, "") {
108 return arg.get() && !arg->end_of_stream() &&
109 arg->timestamp().InMilliseconds() == timestamp_in_ms;
112 MATCHER(IsEndOfStream, "") { return arg.get() && arg->end_of_stream(); }
114 static void OnReadDone(const base::TimeDelta& expected_time,
115 bool* called,
116 DemuxerStream::Status status,
117 const scoped_refptr<DecoderBuffer>& buffer) {
118 EXPECT_EQ(status, DemuxerStream::kOk);
119 EXPECT_EQ(expected_time, buffer->timestamp());
120 *called = true;
123 static void OnReadDone_AbortExpected(
124 bool* called, DemuxerStream::Status status,
125 const scoped_refptr<DecoderBuffer>& buffer) {
126 EXPECT_EQ(status, DemuxerStream::kAborted);
127 EXPECT_EQ(NULL, buffer.get());
128 *called = true;
131 static void OnReadDone_EOSExpected(bool* called,
132 DemuxerStream::Status status,
133 const scoped_refptr<DecoderBuffer>& buffer) {
134 EXPECT_EQ(status, DemuxerStream::kOk);
135 EXPECT_TRUE(buffer->end_of_stream());
136 *called = true;
139 static void OnSeekDone_OKExpected(bool* called, PipelineStatus status) {
140 EXPECT_EQ(status, PIPELINE_OK);
141 *called = true;
144 class ChunkDemuxerTest : public ::testing::Test {
145 protected:
146 enum CodecsIndex {
147 AUDIO,
148 VIDEO,
149 MAX_CODECS_INDEX
152 // Default cluster to append first for simple tests.
153 scoped_ptr<Cluster> kDefaultFirstCluster() {
154 return GenerateCluster(0, 4);
157 // Default cluster to append after kDefaultFirstCluster()
158 // has been appended. This cluster starts with blocks that
159 // have timestamps consistent with the end times of the blocks
160 // in kDefaultFirstCluster() so that these two clusters represent
161 // a continuous region.
162 scoped_ptr<Cluster> kDefaultSecondCluster() {
163 return GenerateCluster(46, 66, 5);
166 ChunkDemuxerTest()
167 : append_window_end_for_next_append_(kInfiniteDuration()) {
168 init_segment_received_cb_ =
169 base::Bind(&ChunkDemuxerTest::InitSegmentReceived,
170 base::Unretained(this));
171 CreateNewDemuxer();
174 void CreateNewDemuxer() {
175 base::Closure open_cb =
176 base::Bind(&ChunkDemuxerTest::DemuxerOpened, base::Unretained(this));
177 Demuxer::EncryptedMediaInitDataCB encrypted_media_init_data_cb = base::Bind(
178 &ChunkDemuxerTest::OnEncryptedMediaInitData, base::Unretained(this));
179 demuxer_.reset(new ChunkDemuxer(open_cb, encrypted_media_init_data_cb,
180 scoped_refptr<MediaLog>(new MediaLog()),
181 true));
184 virtual ~ChunkDemuxerTest() {
185 ShutdownDemuxer();
188 void CreateInitSegment(int stream_flags,
189 bool is_audio_encrypted,
190 bool is_video_encrypted,
191 scoped_ptr<uint8[]>* buffer,
192 int* size) {
193 CreateInitSegmentInternal(
194 stream_flags, is_audio_encrypted, is_video_encrypted, buffer, false,
195 size);
198 void CreateInitSegmentWithAlternateTextTrackNum(int stream_flags,
199 bool is_audio_encrypted,
200 bool is_video_encrypted,
201 scoped_ptr<uint8[]>* buffer,
202 int* size) {
203 DCHECK(stream_flags & HAS_TEXT);
204 CreateInitSegmentInternal(
205 stream_flags, is_audio_encrypted, is_video_encrypted, buffer, true,
206 size);
209 void CreateInitSegmentInternal(int stream_flags,
210 bool is_audio_encrypted,
211 bool is_video_encrypted,
212 scoped_ptr<uint8[]>* buffer,
213 bool use_alternate_text_track_id,
214 int* size) {
215 bool has_audio = (stream_flags & HAS_AUDIO) != 0;
216 bool has_video = (stream_flags & HAS_VIDEO) != 0;
217 bool has_text = (stream_flags & HAS_TEXT) != 0;
218 scoped_refptr<DecoderBuffer> ebml_header;
219 scoped_refptr<DecoderBuffer> info;
220 scoped_refptr<DecoderBuffer> audio_track_entry;
221 scoped_refptr<DecoderBuffer> video_track_entry;
222 scoped_refptr<DecoderBuffer> audio_content_encodings;
223 scoped_refptr<DecoderBuffer> video_content_encodings;
224 scoped_refptr<DecoderBuffer> text_track_entry;
226 ebml_header = ReadTestDataFile("webm_ebml_element");
228 info = ReadTestDataFile("webm_info_element");
230 int tracks_element_size = 0;
232 if (has_audio) {
233 audio_track_entry = ReadTestDataFile("webm_vorbis_track_entry");
234 tracks_element_size += audio_track_entry->data_size();
235 if (is_audio_encrypted) {
236 audio_content_encodings = ReadTestDataFile("webm_content_encodings");
237 tracks_element_size += audio_content_encodings->data_size();
241 if (has_video) {
242 video_track_entry = ReadTestDataFile("webm_vp8_track_entry");
243 tracks_element_size += video_track_entry->data_size();
244 if (is_video_encrypted) {
245 video_content_encodings = ReadTestDataFile("webm_content_encodings");
246 tracks_element_size += video_content_encodings->data_size();
250 if (has_text) {
251 // TODO(matthewjheaney): create an abstraction to do
252 // this (http://crbug/321454).
253 // We need it to also handle the creation of multiple text tracks.
255 // This is the track entry for a text track,
256 // TrackEntry [AE], size=30
257 // TrackNum [D7], size=1, val=3 (or 4 if use_alternate_text_track_id)
258 // TrackUID [73] [C5], size=1, value=3 (must remain constant for same
259 // track, even if TrackNum changes)
260 // TrackType [83], size=1, val=0x11
261 // CodecId [86], size=18, val="D_WEBVTT/SUBTITLES"
262 char str[] = "\xAE\x9E\xD7\x81\x03\x73\xC5\x81\x03"
263 "\x83\x81\x11\x86\x92"
264 "D_WEBVTT/SUBTITLES";
265 DCHECK_EQ(str[4], kTextTrackNum);
266 if (use_alternate_text_track_id)
267 str[4] = kAlternateTextTrackNum;
269 const int len = strlen(str);
270 DCHECK_EQ(len, 32);
271 const uint8* const buf = reinterpret_cast<const uint8*>(str);
272 text_track_entry = DecoderBuffer::CopyFrom(buf, len);
273 tracks_element_size += text_track_entry->data_size();
276 *size = ebml_header->data_size() + info->data_size() +
277 kTracksHeaderSize + tracks_element_size;
279 buffer->reset(new uint8[*size]);
281 uint8* buf = buffer->get();
282 memcpy(buf, ebml_header->data(), ebml_header->data_size());
283 buf += ebml_header->data_size();
285 memcpy(buf, info->data(), info->data_size());
286 buf += info->data_size();
288 memcpy(buf, kTracksHeader, kTracksHeaderSize);
289 WriteInt64(buf + kTracksSizeOffset, tracks_element_size);
290 buf += kTracksHeaderSize;
292 // TODO(xhwang): Simplify this! Probably have test data files that contain
293 // ContentEncodings directly instead of trying to create one at run-time.
294 if (has_audio) {
295 memcpy(buf, audio_track_entry->data(),
296 audio_track_entry->data_size());
297 if (is_audio_encrypted) {
298 memcpy(buf + audio_track_entry->data_size(),
299 audio_content_encodings->data(),
300 audio_content_encodings->data_size());
301 WriteInt64(buf + kAudioTrackSizeOffset,
302 audio_track_entry->data_size() +
303 audio_content_encodings->data_size() -
304 kAudioTrackEntryHeaderSize);
305 buf += audio_content_encodings->data_size();
307 buf += audio_track_entry->data_size();
310 if (has_video) {
311 memcpy(buf, video_track_entry->data(),
312 video_track_entry->data_size());
313 if (is_video_encrypted) {
314 memcpy(buf + video_track_entry->data_size(),
315 video_content_encodings->data(),
316 video_content_encodings->data_size());
317 WriteInt64(buf + kVideoTrackSizeOffset,
318 video_track_entry->data_size() +
319 video_content_encodings->data_size() -
320 kVideoTrackEntryHeaderSize);
321 buf += video_content_encodings->data_size();
323 buf += video_track_entry->data_size();
326 if (has_text) {
327 memcpy(buf, text_track_entry->data(),
328 text_track_entry->data_size());
329 buf += text_track_entry->data_size();
333 ChunkDemuxer::Status AddId() {
334 return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
337 ChunkDemuxer::Status AddId(const std::string& source_id, int stream_flags) {
338 bool has_audio = (stream_flags & HAS_AUDIO) != 0;
339 bool has_video = (stream_flags & HAS_VIDEO) != 0;
340 std::vector<std::string> codecs;
341 std::string type;
343 if (has_audio) {
344 codecs.push_back("vorbis");
345 type = "audio/webm";
348 if (has_video) {
349 codecs.push_back("vp8");
350 type = "video/webm";
353 if (!has_audio && !has_video) {
354 return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
357 return demuxer_->AddId(source_id, type, codecs);
360 ChunkDemuxer::Status AddIdForMp2tSource(const std::string& source_id) {
361 std::vector<std::string> codecs;
362 std::string type = "video/mp2t";
363 codecs.push_back("mp4a.40.2");
364 codecs.push_back("avc1.640028");
365 return demuxer_->AddId(source_id, type, codecs);
368 void AppendData(const uint8* data, size_t length) {
369 AppendData(kSourceId, data, length);
372 void AppendCluster(const std::string& source_id,
373 scoped_ptr<Cluster> cluster) {
374 AppendData(source_id, cluster->data(), cluster->size());
377 void AppendCluster(scoped_ptr<Cluster> cluster) {
378 AppendCluster(kSourceId, cluster.Pass());
381 void AppendCluster(int timecode, int block_count) {
382 AppendCluster(GenerateCluster(timecode, block_count));
385 void AppendSingleStreamCluster(const std::string& source_id, int track_number,
386 int timecode, int block_count) {
387 int block_duration = 0;
388 switch (track_number) {
389 case kVideoTrackNum:
390 block_duration = kVideoBlockDuration;
391 break;
392 case kAudioTrackNum:
393 block_duration = kAudioBlockDuration;
394 break;
395 case kTextTrackNum: // Fall-through.
396 case kAlternateTextTrackNum:
397 block_duration = kTextBlockDuration;
398 break;
400 ASSERT_NE(block_duration, 0);
401 int end_timecode = timecode + block_count * block_duration;
402 AppendCluster(source_id,
403 GenerateSingleStreamCluster(
404 timecode, end_timecode, track_number, block_duration));
407 struct BlockInfo {
408 BlockInfo()
409 : track_number(0),
410 timestamp_in_ms(0),
411 flags(0),
412 duration(0) {
415 BlockInfo(int tn, int ts, int f, int d)
416 : track_number(tn),
417 timestamp_in_ms(ts),
418 flags(f),
419 duration(d) {
422 int track_number;
423 int timestamp_in_ms;
424 int flags;
425 int duration;
427 bool operator< (const BlockInfo& rhs) const {
428 return timestamp_in_ms < rhs.timestamp_in_ms;
432 // |track_number| - The track number to place in
433 // |block_descriptions| - A space delimited string of block info that
434 // is used to populate |blocks|. Each block info has a timestamp in
435 // milliseconds and optionally followed by a 'K' to indicate that a block
436 // should be marked as a key frame. For example "0K 30 60" should populate
437 // |blocks| with 3 BlockInfo objects: a key frame with timestamp 0 and 2
438 // non-key-frames at 30ms and 60ms.
439 void ParseBlockDescriptions(int track_number,
440 const std::string block_descriptions,
441 std::vector<BlockInfo>* blocks) {
442 std::vector<std::string> timestamps = base::SplitString(
443 block_descriptions, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
445 for (size_t i = 0; i < timestamps.size(); ++i) {
446 std::string timestamp_str = timestamps[i];
447 BlockInfo block_info;
448 block_info.track_number = track_number;
449 block_info.flags = 0;
450 block_info.duration = 0;
452 if (base::EndsWith(timestamp_str, "K", base::CompareCase::SENSITIVE)) {
453 block_info.flags = kWebMFlagKeyframe;
454 // Remove the "K" off of the token.
455 timestamp_str = timestamp_str.substr(0, timestamps[i].length() - 1);
457 CHECK(base::StringToInt(timestamp_str, &block_info.timestamp_in_ms));
459 if (track_number == kTextTrackNum ||
460 track_number == kAlternateTextTrackNum) {
461 block_info.duration = kTextBlockDuration;
462 ASSERT_EQ(kWebMFlagKeyframe, block_info.flags)
463 << "Text block with timestamp " << block_info.timestamp_in_ms
464 << " was not marked as a key frame."
465 << " All text blocks must be key frames";
468 if (track_number == kAudioTrackNum)
469 ASSERT_TRUE(block_info.flags & kWebMFlagKeyframe);
471 blocks->push_back(block_info);
475 scoped_ptr<Cluster> GenerateCluster(const std::vector<BlockInfo>& blocks,
476 bool unknown_size) {
477 DCHECK_GT(blocks.size(), 0u);
478 ClusterBuilder cb;
480 std::vector<uint8> data(10);
481 for (size_t i = 0; i < blocks.size(); ++i) {
482 if (i == 0)
483 cb.SetClusterTimecode(blocks[i].timestamp_in_ms);
485 if (blocks[i].duration) {
486 if (blocks[i].track_number == kVideoTrackNum) {
487 AddVideoBlockGroup(&cb,
488 blocks[i].track_number, blocks[i].timestamp_in_ms,
489 blocks[i].duration, blocks[i].flags);
490 } else {
491 cb.AddBlockGroup(blocks[i].track_number, blocks[i].timestamp_in_ms,
492 blocks[i].duration, blocks[i].flags,
493 &data[0], data.size());
495 } else {
496 cb.AddSimpleBlock(blocks[i].track_number, blocks[i].timestamp_in_ms,
497 blocks[i].flags,
498 &data[0], data.size());
502 return unknown_size ? cb.FinishWithUnknownSize() : cb.Finish();
505 scoped_ptr<Cluster> GenerateCluster(
506 std::priority_queue<BlockInfo> block_queue,
507 bool unknown_size) {
508 std::vector<BlockInfo> blocks(block_queue.size());
509 for (size_t i = block_queue.size() - 1; !block_queue.empty(); --i) {
510 blocks[i] = block_queue.top();
511 block_queue.pop();
514 return GenerateCluster(blocks, unknown_size);
517 // |block_descriptions| - The block descriptions used to construct the
518 // cluster. See the documentation for ParseBlockDescriptions() for details on
519 // the string format.
520 void AppendSingleStreamCluster(const std::string& source_id, int track_number,
521 const std::string& block_descriptions) {
522 std::vector<BlockInfo> blocks;
523 ParseBlockDescriptions(track_number, block_descriptions, &blocks);
524 AppendCluster(source_id, GenerateCluster(blocks, false));
527 struct MuxedStreamInfo {
528 MuxedStreamInfo()
529 : track_number(0),
530 block_descriptions("")
533 MuxedStreamInfo(int track_num, const char* block_desc)
534 : track_number(track_num),
535 block_descriptions(block_desc) {
538 int track_number;
539 // The block description passed to ParseBlockDescriptions().
540 // See the documentation for that method for details on the string format.
541 const char* block_descriptions;
544 void AppendMuxedCluster(const MuxedStreamInfo& msi_1,
545 const MuxedStreamInfo& msi_2) {
546 std::vector<MuxedStreamInfo> msi(2);
547 msi[0] = msi_1;
548 msi[1] = msi_2;
549 AppendMuxedCluster(msi);
552 void AppendMuxedCluster(const MuxedStreamInfo& msi_1,
553 const MuxedStreamInfo& msi_2,
554 const MuxedStreamInfo& msi_3) {
555 std::vector<MuxedStreamInfo> msi(3);
556 msi[0] = msi_1;
557 msi[1] = msi_2;
558 msi[2] = msi_3;
559 AppendMuxedCluster(msi);
562 void AppendMuxedCluster(const std::vector<MuxedStreamInfo> msi) {
563 std::priority_queue<BlockInfo> block_queue;
564 for (size_t i = 0; i < msi.size(); ++i) {
565 std::vector<BlockInfo> track_blocks;
566 ParseBlockDescriptions(msi[i].track_number, msi[i].block_descriptions,
567 &track_blocks);
569 for (size_t j = 0; j < track_blocks.size(); ++j)
570 block_queue.push(track_blocks[j]);
573 AppendCluster(kSourceId, GenerateCluster(block_queue, false));
576 void AppendData(const std::string& source_id,
577 const uint8* data, size_t length) {
578 EXPECT_CALL(host_, AddBufferedTimeRange(_, _)).Times(AnyNumber());
580 demuxer_->AppendData(source_id, data, length,
581 append_window_start_for_next_append_,
582 append_window_end_for_next_append_,
583 &timestamp_offset_map_[source_id],
584 init_segment_received_cb_);
587 void AppendDataInPieces(const uint8* data, size_t length) {
588 AppendDataInPieces(data, length, 7);
591 void AppendDataInPieces(const uint8* data, size_t length, size_t piece_size) {
592 const uint8* start = data;
593 const uint8* end = data + length;
594 while (start < end) {
595 size_t append_size = std::min(piece_size,
596 static_cast<size_t>(end - start));
597 AppendData(start, append_size);
598 start += append_size;
602 void AppendInitSegment(int stream_flags) {
603 AppendInitSegmentWithSourceId(kSourceId, stream_flags);
606 void AppendInitSegmentWithSourceId(const std::string& source_id,
607 int stream_flags) {
608 AppendInitSegmentWithEncryptedInfo(source_id, stream_flags, false, false);
611 void AppendInitSegmentWithEncryptedInfo(const std::string& source_id,
612 int stream_flags,
613 bool is_audio_encrypted,
614 bool is_video_encrypted) {
615 scoped_ptr<uint8[]> info_tracks;
616 int info_tracks_size = 0;
617 CreateInitSegment(stream_flags,
618 is_audio_encrypted, is_video_encrypted,
619 &info_tracks, &info_tracks_size);
620 AppendData(source_id, info_tracks.get(), info_tracks_size);
623 void AppendGarbage() {
624 // Fill up an array with gibberish.
625 int garbage_cluster_size = 10;
626 scoped_ptr<uint8[]> garbage_cluster(new uint8[garbage_cluster_size]);
627 for (int i = 0; i < garbage_cluster_size; ++i)
628 garbage_cluster[i] = i;
629 AppendData(garbage_cluster.get(), garbage_cluster_size);
632 void InitDoneCalled(PipelineStatus expected_status,
633 PipelineStatus status) {
634 EXPECT_EQ(status, expected_status);
637 void AppendEmptyCluster(int timecode) {
638 AppendCluster(GenerateEmptyCluster(timecode));
641 PipelineStatusCB CreateInitDoneCB(const base::TimeDelta& expected_duration,
642 PipelineStatus expected_status) {
643 if (expected_duration != kNoTimestamp())
644 EXPECT_CALL(host_, SetDuration(expected_duration));
645 return CreateInitDoneCB(expected_status);
648 PipelineStatusCB CreateInitDoneCB(PipelineStatus expected_status) {
649 return base::Bind(&ChunkDemuxerTest::InitDoneCalled,
650 base::Unretained(this),
651 expected_status);
654 enum StreamFlags {
655 HAS_AUDIO = 1 << 0,
656 HAS_VIDEO = 1 << 1,
657 HAS_TEXT = 1 << 2
660 bool InitDemuxer(int stream_flags) {
661 return InitDemuxerWithEncryptionInfo(stream_flags, false, false);
664 bool InitDemuxerWithEncryptionInfo(
665 int stream_flags, bool is_audio_encrypted, bool is_video_encrypted) {
667 PipelineStatus expected_status =
668 (stream_flags != 0) ? PIPELINE_OK : PIPELINE_ERROR_DECODE;
670 base::TimeDelta expected_duration = kNoTimestamp();
671 if (expected_status == PIPELINE_OK)
672 expected_duration = kDefaultDuration();
674 EXPECT_CALL(*this, DemuxerOpened());
676 // Adding expectation prior to CreateInitDoneCB() here because InSequence
677 // tests require init segment received before duration set. Also, only
678 // expect an init segment received callback if there is actually a track in
679 // it.
680 if (stream_flags != 0)
681 EXPECT_CALL(*this, InitSegmentReceived());
683 demuxer_->Initialize(
684 &host_, CreateInitDoneCB(expected_duration, expected_status), true);
686 if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
687 return false;
689 AppendInitSegmentWithEncryptedInfo(
690 kSourceId, stream_flags,
691 is_audio_encrypted, is_video_encrypted);
692 return true;
695 bool InitDemuxerAudioAndVideoSourcesText(const std::string& audio_id,
696 const std::string& video_id,
697 bool has_text) {
698 EXPECT_CALL(*this, DemuxerOpened());
699 demuxer_->Initialize(
700 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
702 if (AddId(audio_id, HAS_AUDIO) != ChunkDemuxer::kOk)
703 return false;
704 if (AddId(video_id, HAS_VIDEO) != ChunkDemuxer::kOk)
705 return false;
707 int audio_flags = HAS_AUDIO;
708 int video_flags = HAS_VIDEO;
710 if (has_text) {
711 audio_flags |= HAS_TEXT;
712 video_flags |= HAS_TEXT;
715 EXPECT_CALL(*this, InitSegmentReceived());
716 AppendInitSegmentWithSourceId(audio_id, audio_flags);
717 EXPECT_CALL(*this, InitSegmentReceived());
718 AppendInitSegmentWithSourceId(video_id, video_flags);
719 return true;
722 bool InitDemuxerAudioAndVideoSources(const std::string& audio_id,
723 const std::string& video_id) {
724 return InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, false);
727 // Initializes the demuxer with data from 2 files with different
728 // decoder configurations. This is used to test the decoder config change
729 // logic.
731 // bear-320x240.webm VideoDecoderConfig returns 320x240 for its natural_size()
732 // bear-640x360.webm VideoDecoderConfig returns 640x360 for its natural_size()
733 // The resulting video stream returns data from each file for the following
734 // time ranges.
735 // bear-320x240.webm : [0-501) [801-2736)
736 // bear-640x360.webm : [527-793)
738 // bear-320x240.webm AudioDecoderConfig returns 3863 for its extra_data_size()
739 // bear-640x360.webm AudioDecoderConfig returns 3935 for its extra_data_size()
740 // The resulting audio stream returns data from each file for the following
741 // time ranges.
742 // bear-320x240.webm : [0-524) [779-2736)
743 // bear-640x360.webm : [527-759)
744 bool InitDemuxerWithConfigChangeData() {
745 scoped_refptr<DecoderBuffer> bear1 = ReadTestDataFile("bear-320x240.webm");
746 scoped_refptr<DecoderBuffer> bear2 = ReadTestDataFile("bear-640x360.webm");
748 EXPECT_CALL(*this, DemuxerOpened());
750 // Adding expectation prior to CreateInitDoneCB() here because InSequence
751 // tests require init segment received before duration set.
752 EXPECT_CALL(*this, InitSegmentReceived());
753 demuxer_->Initialize(
754 &host_, CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744),
755 PIPELINE_OK), true);
757 if (AddId(kSourceId, HAS_AUDIO | HAS_VIDEO) != ChunkDemuxer::kOk)
758 return false;
760 // Append the whole bear1 file.
761 // TODO(wolenetz/acolwell): Remove this extra SetDuration expectation once
762 // the files are fixed to have the correct duration in their init segments,
763 // and the CreateInitDoneCB() call, above, is fixed to used that duration.
764 // See http://crbug.com/354284.
765 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
766 AppendData(bear1->data(), bear1->data_size());
767 // Last audio frame has timestamp 2721 and duration 24 (estimated from max
768 // seen so far for audio track).
769 // Last video frame has timestamp 2703 and duration 33 (from TrackEntry
770 // DefaultDuration for video track).
771 CheckExpectedRanges(kSourceId, "{ [0,2736) }");
773 // Append initialization segment for bear2.
774 // Note: Offsets here and below are derived from
775 // media/test/data/bear-640x360-manifest.js and
776 // media/test/data/bear-320x240-manifest.js which were
777 // generated from media/test/data/bear-640x360.webm and
778 // media/test/data/bear-320x240.webm respectively.
779 EXPECT_CALL(*this, InitSegmentReceived());
780 AppendData(bear2->data(), 4340);
782 // Append a media segment that goes from [0.527000, 1.014000).
783 AppendData(bear2->data() + 55290, 18785);
784 CheckExpectedRanges(kSourceId, "{ [0,1027) [1201,2736) }");
786 // Append initialization segment for bear1 & fill gap with [779-1197)
787 // segment.
788 EXPECT_CALL(*this, InitSegmentReceived());
789 AppendData(bear1->data(), 4370);
790 AppendData(bear1->data() + 72737, 28183);
791 CheckExpectedRanges(kSourceId, "{ [0,2736) }");
793 MarkEndOfStream(PIPELINE_OK);
794 return true;
797 void ShutdownDemuxer() {
798 if (demuxer_) {
799 demuxer_->Shutdown();
800 message_loop_.RunUntilIdle();
804 void AddSimpleBlock(ClusterBuilder* cb, int track_num, int64 timecode) {
805 uint8 data[] = { 0x00 };
806 cb->AddSimpleBlock(track_num, timecode, 0, data, sizeof(data));
809 scoped_ptr<Cluster> GenerateCluster(int timecode, int block_count) {
810 return GenerateCluster(timecode, timecode, block_count);
813 void AddVideoBlockGroup(ClusterBuilder* cb, int track_num, int64 timecode,
814 int duration, int flags) {
815 const uint8* data =
816 (flags & kWebMFlagKeyframe) != 0 ? kVP8Keyframe : kVP8Interframe;
817 int size = (flags & kWebMFlagKeyframe) != 0 ? sizeof(kVP8Keyframe) :
818 sizeof(kVP8Interframe);
819 cb->AddBlockGroup(track_num, timecode, duration, flags, data, size);
822 scoped_ptr<Cluster> GenerateCluster(int first_audio_timecode,
823 int first_video_timecode,
824 int block_count) {
825 return GenerateCluster(first_audio_timecode, first_video_timecode,
826 block_count, false);
828 scoped_ptr<Cluster> GenerateCluster(int first_audio_timecode,
829 int first_video_timecode,
830 int block_count,
831 bool unknown_size) {
832 CHECK_GT(block_count, 0);
834 std::priority_queue<BlockInfo> block_queue;
836 if (block_count == 1) {
837 block_queue.push(BlockInfo(kAudioTrackNum,
838 first_audio_timecode,
839 kWebMFlagKeyframe,
840 kAudioBlockDuration));
841 return GenerateCluster(block_queue, unknown_size);
844 int audio_timecode = first_audio_timecode;
845 int video_timecode = first_video_timecode;
847 // Create simple blocks for everything except the last 2 blocks.
848 // The first video frame must be a key frame.
849 uint8 video_flag = kWebMFlagKeyframe;
850 for (int i = 0; i < block_count - 2; i++) {
851 if (audio_timecode <= video_timecode) {
852 block_queue.push(BlockInfo(kAudioTrackNum,
853 audio_timecode,
854 kWebMFlagKeyframe,
855 0));
856 audio_timecode += kAudioBlockDuration;
857 continue;
860 block_queue.push(BlockInfo(kVideoTrackNum,
861 video_timecode,
862 video_flag,
863 0));
864 video_timecode += kVideoBlockDuration;
865 video_flag = 0;
868 // Make the last 2 blocks BlockGroups so that they don't get delayed by the
869 // block duration calculation logic.
870 block_queue.push(BlockInfo(kAudioTrackNum,
871 audio_timecode,
872 kWebMFlagKeyframe,
873 kAudioBlockDuration));
874 block_queue.push(BlockInfo(kVideoTrackNum,
875 video_timecode,
876 video_flag,
877 kVideoBlockDuration));
879 return GenerateCluster(block_queue, unknown_size);
882 scoped_ptr<Cluster> GenerateSingleStreamCluster(int timecode,
883 int end_timecode,
884 int track_number,
885 int block_duration) {
886 CHECK_GT(end_timecode, timecode);
888 std::vector<uint8> data(kBlockSize);
890 ClusterBuilder cb;
891 cb.SetClusterTimecode(timecode);
893 // Create simple blocks for everything except the last block.
894 while (timecode < (end_timecode - block_duration)) {
895 cb.AddSimpleBlock(track_number, timecode, kWebMFlagKeyframe,
896 &data[0], data.size());
897 timecode += block_duration;
900 if (track_number == kVideoTrackNum) {
901 AddVideoBlockGroup(&cb, track_number, timecode, block_duration,
902 kWebMFlagKeyframe);
903 } else {
904 cb.AddBlockGroup(track_number, timecode, block_duration,
905 kWebMFlagKeyframe, &data[0], data.size());
908 return cb.Finish();
911 void Read(DemuxerStream::Type type, const DemuxerStream::ReadCB& read_cb) {
912 demuxer_->GetStream(type)->Read(read_cb);
913 message_loop_.RunUntilIdle();
916 void ReadAudio(const DemuxerStream::ReadCB& read_cb) {
917 Read(DemuxerStream::AUDIO, read_cb);
920 void ReadVideo(const DemuxerStream::ReadCB& read_cb) {
921 Read(DemuxerStream::VIDEO, read_cb);
924 void GenerateExpectedReads(int timecode, int block_count) {
925 GenerateExpectedReads(timecode, timecode, block_count);
928 void GenerateExpectedReads(int start_audio_timecode,
929 int start_video_timecode,
930 int block_count) {
931 CHECK_GT(block_count, 0);
933 if (block_count == 1) {
934 ExpectRead(DemuxerStream::AUDIO, start_audio_timecode);
935 return;
938 int audio_timecode = start_audio_timecode;
939 int video_timecode = start_video_timecode;
941 for (int i = 0; i < block_count; i++) {
942 if (audio_timecode <= video_timecode) {
943 ExpectRead(DemuxerStream::AUDIO, audio_timecode);
944 audio_timecode += kAudioBlockDuration;
945 continue;
948 ExpectRead(DemuxerStream::VIDEO, video_timecode);
949 video_timecode += kVideoBlockDuration;
953 void GenerateSingleStreamExpectedReads(int timecode,
954 int block_count,
955 DemuxerStream::Type type,
956 int block_duration) {
957 CHECK_GT(block_count, 0);
958 int stream_timecode = timecode;
960 for (int i = 0; i < block_count; i++) {
961 ExpectRead(type, stream_timecode);
962 stream_timecode += block_duration;
966 void GenerateAudioStreamExpectedReads(int timecode, int block_count) {
967 GenerateSingleStreamExpectedReads(
968 timecode, block_count, DemuxerStream::AUDIO, kAudioBlockDuration);
971 void GenerateVideoStreamExpectedReads(int timecode, int block_count) {
972 GenerateSingleStreamExpectedReads(
973 timecode, block_count, DemuxerStream::VIDEO, kVideoBlockDuration);
976 scoped_ptr<Cluster> GenerateEmptyCluster(int timecode) {
977 ClusterBuilder cb;
978 cb.SetClusterTimecode(timecode);
979 return cb.Finish();
982 void CheckExpectedRanges(const std::string& expected) {
983 CheckExpectedRanges(kSourceId, expected);
986 void CheckExpectedRanges(const std::string& id,
987 const std::string& expected) {
988 CheckExpectedRanges(demuxer_->GetBufferedRanges(id), expected);
991 void CheckExpectedRanges(DemuxerStream::Type type,
992 const std::string& expected) {
993 ChunkDemuxerStream* stream =
994 static_cast<ChunkDemuxerStream*>(demuxer_->GetStream(type));
995 CheckExpectedRanges(stream->GetBufferedRanges(kDefaultDuration()),
996 expected);
999 void CheckExpectedRanges(const Ranges<base::TimeDelta>& r,
1000 const std::string& expected) {
1001 std::stringstream ss;
1002 ss << "{ ";
1003 for (size_t i = 0; i < r.size(); ++i) {
1004 ss << "[" << r.start(i).InMilliseconds() << ","
1005 << r.end(i).InMilliseconds() << ") ";
1007 ss << "}";
1008 EXPECT_EQ(expected, ss.str());
1011 MOCK_METHOD2(ReadDone, void(DemuxerStream::Status status,
1012 const scoped_refptr<DecoderBuffer>&));
1014 void StoreStatusAndBuffer(DemuxerStream::Status* status_out,
1015 scoped_refptr<DecoderBuffer>* buffer_out,
1016 DemuxerStream::Status status,
1017 const scoped_refptr<DecoderBuffer>& buffer) {
1018 *status_out = status;
1019 *buffer_out = buffer;
1022 void ReadUntilNotOkOrEndOfStream(DemuxerStream::Type type,
1023 DemuxerStream::Status* status,
1024 base::TimeDelta* last_timestamp) {
1025 DemuxerStream* stream = demuxer_->GetStream(type);
1026 scoped_refptr<DecoderBuffer> buffer;
1028 *last_timestamp = kNoTimestamp();
1029 do {
1030 stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
1031 base::Unretained(this), status, &buffer));
1032 base::MessageLoop::current()->RunUntilIdle();
1033 if (*status == DemuxerStream::kOk && !buffer->end_of_stream())
1034 *last_timestamp = buffer->timestamp();
1035 } while (*status == DemuxerStream::kOk && !buffer->end_of_stream());
1038 void ExpectEndOfStream(DemuxerStream::Type type) {
1039 EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk, IsEndOfStream()));
1040 demuxer_->GetStream(type)->Read(base::Bind(
1041 &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
1042 message_loop_.RunUntilIdle();
1045 void ExpectRead(DemuxerStream::Type type, int64 timestamp_in_ms) {
1046 EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk,
1047 HasTimestamp(timestamp_in_ms)));
1048 demuxer_->GetStream(type)->Read(base::Bind(
1049 &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
1050 message_loop_.RunUntilIdle();
1053 void ExpectConfigChanged(DemuxerStream::Type type) {
1054 EXPECT_CALL(*this, ReadDone(DemuxerStream::kConfigChanged, _));
1055 demuxer_->GetStream(type)->Read(base::Bind(
1056 &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
1057 message_loop_.RunUntilIdle();
1060 void CheckExpectedBuffers(DemuxerStream* stream,
1061 const std::string& expected) {
1062 std::vector<std::string> timestamps = base::SplitString(
1063 expected, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
1064 std::stringstream ss;
1065 for (size_t i = 0; i < timestamps.size(); ++i) {
1066 // Initialize status to kAborted since it's possible for Read() to return
1067 // without calling StoreStatusAndBuffer() if it doesn't have any buffers
1068 // left to return.
1069 DemuxerStream::Status status = DemuxerStream::kAborted;
1070 scoped_refptr<DecoderBuffer> buffer;
1071 stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
1072 base::Unretained(this), &status, &buffer));
1073 base::MessageLoop::current()->RunUntilIdle();
1074 if (status != DemuxerStream::kOk || buffer->end_of_stream())
1075 break;
1077 if (i > 0)
1078 ss << " ";
1079 ss << buffer->timestamp().InMilliseconds();
1081 if (buffer->is_key_frame())
1082 ss << "K";
1084 // Handle preroll buffers.
1085 if (base::EndsWith(timestamps[i], "P", base::CompareCase::SENSITIVE)) {
1086 ASSERT_EQ(kInfiniteDuration(), buffer->discard_padding().first);
1087 ASSERT_EQ(base::TimeDelta(), buffer->discard_padding().second);
1088 ss << "P";
1091 EXPECT_EQ(expected, ss.str());
1094 MOCK_METHOD1(Checkpoint, void(int id));
1096 struct BufferTimestamps {
1097 int video_time_ms;
1098 int audio_time_ms;
1100 static const int kSkip = -1;
1102 // Test parsing a WebM file.
1103 // |filename| - The name of the file in media/test/data to parse.
1104 // |timestamps| - The expected timestamps on the parsed buffers.
1105 // a timestamp of kSkip indicates that a Read() call for that stream
1106 // shouldn't be made on that iteration of the loop. If both streams have
1107 // a kSkip then the loop will terminate.
1108 bool ParseWebMFile(const std::string& filename,
1109 const BufferTimestamps* timestamps,
1110 const base::TimeDelta& duration) {
1111 return ParseWebMFile(filename, timestamps, duration, HAS_AUDIO | HAS_VIDEO);
1114 bool ParseWebMFile(const std::string& filename,
1115 const BufferTimestamps* timestamps,
1116 const base::TimeDelta& duration,
1117 int stream_flags) {
1118 EXPECT_CALL(*this, DemuxerOpened());
1119 demuxer_->Initialize(
1120 &host_, CreateInitDoneCB(duration, PIPELINE_OK), true);
1122 if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
1123 return false;
1125 // Read a WebM file into memory and send the data to the demuxer.
1126 scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(filename);
1127 EXPECT_CALL(*this, InitSegmentReceived());
1128 AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
1130 // Verify that the timestamps on the first few packets match what we
1131 // expect.
1132 for (size_t i = 0;
1133 (timestamps[i].audio_time_ms != kSkip ||
1134 timestamps[i].video_time_ms != kSkip);
1135 i++) {
1136 bool audio_read_done = false;
1137 bool video_read_done = false;
1139 if (timestamps[i].audio_time_ms != kSkip) {
1140 ReadAudio(base::Bind(&OnReadDone,
1141 base::TimeDelta::FromMilliseconds(
1142 timestamps[i].audio_time_ms),
1143 &audio_read_done));
1144 EXPECT_TRUE(audio_read_done);
1147 if (timestamps[i].video_time_ms != kSkip) {
1148 ReadVideo(base::Bind(&OnReadDone,
1149 base::TimeDelta::FromMilliseconds(
1150 timestamps[i].video_time_ms),
1151 &video_read_done));
1152 EXPECT_TRUE(video_read_done);
1156 return true;
1159 MOCK_METHOD0(DemuxerOpened, void());
1160 MOCK_METHOD2(OnEncryptedMediaInitData,
1161 void(EmeInitDataType init_data_type,
1162 const std::vector<uint8>& init_data));
1164 MOCK_METHOD0(InitSegmentReceived, void(void));
1166 void Seek(base::TimeDelta seek_time) {
1167 demuxer_->StartWaitingForSeek(seek_time);
1168 demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
1169 message_loop_.RunUntilIdle();
1172 void MarkEndOfStream(PipelineStatus status) {
1173 demuxer_->MarkEndOfStream(status);
1174 message_loop_.RunUntilIdle();
1177 bool SetTimestampOffset(const std::string& id,
1178 base::TimeDelta timestamp_offset) {
1179 if (demuxer_->IsParsingMediaSegment(id))
1180 return false;
1182 timestamp_offset_map_[id] = timestamp_offset;
1183 return true;
1186 base::MessageLoop message_loop_;
1187 MockDemuxerHost host_;
1189 scoped_ptr<ChunkDemuxer> demuxer_;
1190 ChunkDemuxer::InitSegmentReceivedCB init_segment_received_cb_;
1192 base::TimeDelta append_window_start_for_next_append_;
1193 base::TimeDelta append_window_end_for_next_append_;
1195 // Map of source id to timestamp offset to use for the next AppendData()
1196 // operation for that source id.
1197 std::map<std::string, base::TimeDelta> timestamp_offset_map_;
1199 private:
1200 DISALLOW_COPY_AND_ASSIGN(ChunkDemuxerTest);
1203 TEST_F(ChunkDemuxerTest, Init) {
1204 // Test no streams, audio-only, video-only, and audio & video scenarios.
1205 // Audio and video streams can be encrypted or not encrypted.
1206 for (int i = 0; i < 16; i++) {
1207 bool has_audio = (i & 0x1) != 0;
1208 bool has_video = (i & 0x2) != 0;
1209 bool is_audio_encrypted = (i & 0x4) != 0;
1210 bool is_video_encrypted = (i & 0x8) != 0;
1212 // No test on invalid combination.
1213 if ((!has_audio && is_audio_encrypted) ||
1214 (!has_video && is_video_encrypted)) {
1215 continue;
1218 CreateNewDemuxer();
1220 if (is_audio_encrypted || is_video_encrypted) {
1221 int need_key_count = (is_audio_encrypted ? 1 : 0) +
1222 (is_video_encrypted ? 1 : 0);
1223 EXPECT_CALL(*this, OnEncryptedMediaInitData(
1224 EmeInitDataType::WEBM,
1225 std::vector<uint8>(
1226 kEncryptedMediaInitData,
1227 kEncryptedMediaInitData +
1228 arraysize(kEncryptedMediaInitData))))
1229 .Times(Exactly(need_key_count));
1232 int stream_flags = 0;
1233 if (has_audio)
1234 stream_flags |= HAS_AUDIO;
1236 if (has_video)
1237 stream_flags |= HAS_VIDEO;
1239 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1240 stream_flags, is_audio_encrypted, is_video_encrypted));
1242 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1243 if (has_audio) {
1244 ASSERT_TRUE(audio_stream);
1246 const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
1247 EXPECT_EQ(kCodecVorbis, config.codec());
1248 EXPECT_EQ(32, config.bits_per_channel());
1249 EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
1250 EXPECT_EQ(44100, config.samples_per_second());
1251 EXPECT_TRUE(config.extra_data());
1252 EXPECT_GT(config.extra_data_size(), 0u);
1253 EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
1254 EXPECT_EQ(is_audio_encrypted,
1255 audio_stream->audio_decoder_config().is_encrypted());
1256 EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
1257 ->supports_partial_append_window_trimming());
1258 } else {
1259 EXPECT_FALSE(audio_stream);
1262 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1263 if (has_video) {
1264 EXPECT_TRUE(video_stream);
1265 EXPECT_EQ(is_video_encrypted,
1266 video_stream->video_decoder_config().is_encrypted());
1267 EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
1268 ->supports_partial_append_window_trimming());
1269 } else {
1270 EXPECT_FALSE(video_stream);
1273 ShutdownDemuxer();
1274 demuxer_.reset();
1278 // TODO(acolwell): Fold this test into Init tests since the tests are
1279 // almost identical.
1280 TEST_F(ChunkDemuxerTest, InitText) {
1281 // Test with 1 video stream and 1 text streams, and 0 or 1 audio streams.
1282 // No encryption cases handled here.
1283 bool has_video = true;
1284 bool is_audio_encrypted = false;
1285 bool is_video_encrypted = false;
1286 for (int i = 0; i < 2; i++) {
1287 bool has_audio = (i & 0x1) != 0;
1289 CreateNewDemuxer();
1291 DemuxerStream* text_stream = NULL;
1292 TextTrackConfig text_config;
1293 EXPECT_CALL(host_, AddTextStream(_, _))
1294 .WillOnce(DoAll(SaveArg<0>(&text_stream),
1295 SaveArg<1>(&text_config)));
1297 int stream_flags = HAS_TEXT;
1298 if (has_audio)
1299 stream_flags |= HAS_AUDIO;
1301 if (has_video)
1302 stream_flags |= HAS_VIDEO;
1304 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1305 stream_flags, is_audio_encrypted, is_video_encrypted));
1306 ASSERT_TRUE(text_stream);
1307 EXPECT_EQ(DemuxerStream::TEXT, text_stream->type());
1308 EXPECT_EQ(kTextSubtitles, text_config.kind());
1309 EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(text_stream)
1310 ->supports_partial_append_window_trimming());
1312 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1313 if (has_audio) {
1314 ASSERT_TRUE(audio_stream);
1316 const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
1317 EXPECT_EQ(kCodecVorbis, config.codec());
1318 EXPECT_EQ(32, config.bits_per_channel());
1319 EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
1320 EXPECT_EQ(44100, config.samples_per_second());
1321 EXPECT_TRUE(config.extra_data());
1322 EXPECT_GT(config.extra_data_size(), 0u);
1323 EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
1324 EXPECT_EQ(is_audio_encrypted,
1325 audio_stream->audio_decoder_config().is_encrypted());
1326 EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
1327 ->supports_partial_append_window_trimming());
1328 } else {
1329 EXPECT_FALSE(audio_stream);
1332 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1333 if (has_video) {
1334 EXPECT_TRUE(video_stream);
1335 EXPECT_EQ(is_video_encrypted,
1336 video_stream->video_decoder_config().is_encrypted());
1337 EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
1338 ->supports_partial_append_window_trimming());
1339 } else {
1340 EXPECT_FALSE(video_stream);
1343 ShutdownDemuxer();
1344 demuxer_.reset();
1348 TEST_F(ChunkDemuxerTest, SingleTextTrackIdChange) {
1349 // Test with 1 video stream, 1 audio, and 1 text stream. Send a second init
1350 // segment in which the text track ID changes. Verify appended buffers before
1351 // and after the second init segment map to the same underlying track buffers.
1352 CreateNewDemuxer();
1353 DemuxerStream* text_stream = NULL;
1354 TextTrackConfig text_config;
1355 EXPECT_CALL(host_, AddTextStream(_, _))
1356 .WillOnce(DoAll(SaveArg<0>(&text_stream),
1357 SaveArg<1>(&text_config)));
1358 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1359 HAS_TEXT | HAS_AUDIO | HAS_VIDEO, false, false));
1360 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1361 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1362 ASSERT_TRUE(audio_stream);
1363 ASSERT_TRUE(video_stream);
1364 ASSERT_TRUE(text_stream);
1366 AppendMuxedCluster(
1367 MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
1368 MuxedStreamInfo(kVideoTrackNum, "0K 30"),
1369 MuxedStreamInfo(kTextTrackNum, "10K"));
1370 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1372 scoped_ptr<uint8[]> info_tracks;
1373 int info_tracks_size = 0;
1374 CreateInitSegmentWithAlternateTextTrackNum(HAS_TEXT | HAS_AUDIO | HAS_VIDEO,
1375 false, false,
1376 &info_tracks, &info_tracks_size);
1377 EXPECT_CALL(*this, InitSegmentReceived());
1378 demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
1379 append_window_start_for_next_append_,
1380 append_window_end_for_next_append_,
1381 &timestamp_offset_map_[kSourceId],
1382 init_segment_received_cb_);
1384 AppendMuxedCluster(
1385 MuxedStreamInfo(kAudioTrackNum, "46K 69K"),
1386 MuxedStreamInfo(kVideoTrackNum, "60K"),
1387 MuxedStreamInfo(kAlternateTextTrackNum, "45K"));
1389 CheckExpectedRanges(kSourceId, "{ [0,92) }");
1390 CheckExpectedBuffers(audio_stream, "0K 23K 46K 69K");
1391 CheckExpectedBuffers(video_stream, "0K 30 60K");
1392 CheckExpectedBuffers(text_stream, "10K 45K");
1394 ShutdownDemuxer();
1397 TEST_F(ChunkDemuxerTest, InitSegmentSetsNeedRandomAccessPointFlag) {
1398 // Tests that non-key-frames following an init segment are allowed
1399 // and dropped, as expected if the initialization segment received
1400 // algorithm correctly sets the needs random access point flag to true for all
1401 // track buffers. Note that the first initialization segment is insufficient
1402 // to fully test this since needs random access point flag initializes to
1403 // true.
1404 CreateNewDemuxer();
1405 DemuxerStream* text_stream = NULL;
1406 EXPECT_CALL(host_, AddTextStream(_, _))
1407 .WillOnce(SaveArg<0>(&text_stream));
1408 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1409 HAS_TEXT | HAS_AUDIO | HAS_VIDEO, false, false));
1410 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1411 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1412 ASSERT_TRUE(audio_stream && video_stream && text_stream);
1414 AppendMuxedCluster(
1415 MuxedStreamInfo(kAudioTrackNum, "23K"),
1416 MuxedStreamInfo(kVideoTrackNum, "0 30K"),
1417 MuxedStreamInfo(kTextTrackNum, "25K 40K"));
1418 CheckExpectedRanges(kSourceId, "{ [23,46) }");
1420 EXPECT_CALL(*this, InitSegmentReceived());
1421 AppendInitSegment(HAS_TEXT | HAS_AUDIO | HAS_VIDEO);
1422 AppendMuxedCluster(
1423 MuxedStreamInfo(kAudioTrackNum, "46K 69K"),
1424 MuxedStreamInfo(kVideoTrackNum, "60 90K"),
1425 MuxedStreamInfo(kTextTrackNum, "80K 90K"));
1426 CheckExpectedRanges(kSourceId, "{ [23,92) }");
1428 CheckExpectedBuffers(audio_stream, "23K 46K 69K");
1429 CheckExpectedBuffers(video_stream, "30K 90K");
1430 CheckExpectedBuffers(text_stream, "25K 40K 80K 90K");
1433 // Make sure that the demuxer reports an error if Shutdown()
1434 // is called before all the initialization segments are appended.
1435 TEST_F(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppended) {
1436 EXPECT_CALL(*this, DemuxerOpened());
1437 demuxer_->Initialize(
1438 &host_, CreateInitDoneCB(
1439 kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1441 EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
1442 EXPECT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
1444 EXPECT_CALL(*this, InitSegmentReceived());
1445 AppendInitSegmentWithSourceId("audio", HAS_AUDIO);
1447 ShutdownDemuxer();
1450 TEST_F(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppendedText) {
1451 EXPECT_CALL(*this, DemuxerOpened());
1452 demuxer_->Initialize(
1453 &host_, CreateInitDoneCB(
1454 kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1456 EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
1457 EXPECT_EQ(AddId("video_and_text", HAS_VIDEO), ChunkDemuxer::kOk);
1459 EXPECT_CALL(host_, AddTextStream(_, _))
1460 .Times(Exactly(1));
1462 EXPECT_CALL(*this, InitSegmentReceived());
1463 AppendInitSegmentWithSourceId("video_and_text", HAS_VIDEO | HAS_TEXT);
1465 ShutdownDemuxer();
1468 // Verifies that all streams waiting for data receive an end of stream
1469 // buffer when Shutdown() is called.
1470 TEST_F(ChunkDemuxerTest, Shutdown_EndOfStreamWhileWaitingForData) {
1471 DemuxerStream* text_stream = NULL;
1472 EXPECT_CALL(host_, AddTextStream(_, _))
1473 .WillOnce(SaveArg<0>(&text_stream));
1474 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
1476 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1477 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1479 bool audio_read_done = false;
1480 bool video_read_done = false;
1481 bool text_read_done = false;
1482 audio_stream->Read(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
1483 video_stream->Read(base::Bind(&OnReadDone_EOSExpected, &video_read_done));
1484 text_stream->Read(base::Bind(&OnReadDone_EOSExpected, &text_read_done));
1485 message_loop_.RunUntilIdle();
1487 EXPECT_FALSE(audio_read_done);
1488 EXPECT_FALSE(video_read_done);
1489 EXPECT_FALSE(text_read_done);
1491 ShutdownDemuxer();
1493 EXPECT_TRUE(audio_read_done);
1494 EXPECT_TRUE(video_read_done);
1495 EXPECT_TRUE(text_read_done);
1498 // Test that Seek() completes successfully when the first cluster
1499 // arrives.
1500 TEST_F(ChunkDemuxerTest, AppendDataAfterSeek) {
1501 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1502 AppendCluster(kDefaultFirstCluster());
1504 InSequence s;
1506 EXPECT_CALL(*this, Checkpoint(1));
1508 Seek(base::TimeDelta::FromMilliseconds(46));
1510 EXPECT_CALL(*this, Checkpoint(2));
1512 Checkpoint(1);
1514 AppendCluster(kDefaultSecondCluster());
1516 message_loop_.RunUntilIdle();
1518 Checkpoint(2);
1521 // Test that parsing errors are handled for clusters appended after init.
1522 TEST_F(ChunkDemuxerTest, ErrorWhileParsingClusterAfterInit) {
1523 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1524 AppendCluster(kDefaultFirstCluster());
1526 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1527 AppendGarbage();
1530 // Test the case where a Seek() is requested while the parser
1531 // is in the middle of cluster. This is to verify that the parser
1532 // does not reset itself on a seek.
1533 TEST_F(ChunkDemuxerTest, SeekWhileParsingCluster) {
1534 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1536 InSequence s;
1538 scoped_ptr<Cluster> cluster_a(GenerateCluster(0, 6));
1540 // Split the cluster into two appends at an arbitrary point near the end.
1541 int first_append_size = cluster_a->size() - 11;
1542 int second_append_size = cluster_a->size() - first_append_size;
1544 // Append the first part of the cluster.
1545 AppendData(cluster_a->data(), first_append_size);
1547 ExpectRead(DemuxerStream::AUDIO, 0);
1548 ExpectRead(DemuxerStream::VIDEO, 0);
1549 ExpectRead(DemuxerStream::AUDIO, kAudioBlockDuration);
1551 Seek(base::TimeDelta::FromSeconds(5));
1553 // Append the rest of the cluster.
1554 AppendData(cluster_a->data() + first_append_size, second_append_size);
1556 // Append the new cluster and verify that only the blocks
1557 // in the new cluster are returned.
1558 AppendCluster(GenerateCluster(5000, 6));
1559 GenerateExpectedReads(5000, 6);
1562 // Test the case where AppendData() is called before Init().
1563 TEST_F(ChunkDemuxerTest, AppendDataBeforeInit) {
1564 scoped_ptr<uint8[]> info_tracks;
1565 int info_tracks_size = 0;
1566 CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
1567 false, false, &info_tracks, &info_tracks_size);
1568 demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
1569 append_window_start_for_next_append_,
1570 append_window_end_for_next_append_,
1571 &timestamp_offset_map_[kSourceId],
1572 init_segment_received_cb_);
1575 // Make sure Read() callbacks are dispatched with the proper data.
1576 TEST_F(ChunkDemuxerTest, Read) {
1577 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1579 AppendCluster(kDefaultFirstCluster());
1581 bool audio_read_done = false;
1582 bool video_read_done = false;
1583 ReadAudio(base::Bind(&OnReadDone,
1584 base::TimeDelta::FromMilliseconds(0),
1585 &audio_read_done));
1586 ReadVideo(base::Bind(&OnReadDone,
1587 base::TimeDelta::FromMilliseconds(0),
1588 &video_read_done));
1590 EXPECT_TRUE(audio_read_done);
1591 EXPECT_TRUE(video_read_done);
1594 TEST_F(ChunkDemuxerTest, OutOfOrderClusters) {
1595 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1596 AppendCluster(kDefaultFirstCluster());
1597 AppendCluster(GenerateCluster(10, 4));
1599 // Make sure that AppendCluster() does not fail with a cluster that has
1600 // overlaps with the previously appended cluster.
1601 AppendCluster(GenerateCluster(5, 4));
1603 // Verify that AppendData() can still accept more data.
1604 scoped_ptr<Cluster> cluster_c(GenerateCluster(45, 2));
1605 demuxer_->AppendData(kSourceId, cluster_c->data(), cluster_c->size(),
1606 append_window_start_for_next_append_,
1607 append_window_end_for_next_append_,
1608 &timestamp_offset_map_[kSourceId],
1609 init_segment_received_cb_);
1612 TEST_F(ChunkDemuxerTest, NonMonotonicButAboveClusterTimecode) {
1613 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1614 AppendCluster(kDefaultFirstCluster());
1616 ClusterBuilder cb;
1618 // Test the case where block timecodes are not monotonically
1619 // increasing but stay above the cluster timecode.
1620 cb.SetClusterTimecode(5);
1621 AddSimpleBlock(&cb, kAudioTrackNum, 5);
1622 AddSimpleBlock(&cb, kVideoTrackNum, 10);
1623 AddSimpleBlock(&cb, kAudioTrackNum, 7);
1624 AddSimpleBlock(&cb, kVideoTrackNum, 15);
1626 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1627 AppendCluster(cb.Finish());
1629 // Verify that AppendData() ignores data after the error.
1630 scoped_ptr<Cluster> cluster_b(GenerateCluster(20, 2));
1631 demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
1632 append_window_start_for_next_append_,
1633 append_window_end_for_next_append_,
1634 &timestamp_offset_map_[kSourceId],
1635 init_segment_received_cb_);
1638 TEST_F(ChunkDemuxerTest, BackwardsAndBeforeClusterTimecode) {
1639 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1640 AppendCluster(kDefaultFirstCluster());
1642 ClusterBuilder cb;
1644 // Test timecodes going backwards and including values less than the cluster
1645 // timecode.
1646 cb.SetClusterTimecode(5);
1647 AddSimpleBlock(&cb, kAudioTrackNum, 5);
1648 AddSimpleBlock(&cb, kVideoTrackNum, 5);
1649 AddSimpleBlock(&cb, kAudioTrackNum, 3);
1650 AddSimpleBlock(&cb, kVideoTrackNum, 3);
1652 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1653 AppendCluster(cb.Finish());
1655 // Verify that AppendData() ignores data after the error.
1656 scoped_ptr<Cluster> cluster_b(GenerateCluster(6, 2));
1657 demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
1658 append_window_start_for_next_append_,
1659 append_window_end_for_next_append_,
1660 &timestamp_offset_map_[kSourceId],
1661 init_segment_received_cb_);
1665 TEST_F(ChunkDemuxerTest, PerStreamMonotonicallyIncreasingTimestamps) {
1666 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1667 AppendCluster(kDefaultFirstCluster());
1669 ClusterBuilder cb;
1671 // Test monotonic increasing timestamps on a per stream
1672 // basis.
1673 cb.SetClusterTimecode(5);
1674 AddSimpleBlock(&cb, kAudioTrackNum, 5);
1675 AddSimpleBlock(&cb, kVideoTrackNum, 5);
1676 AddSimpleBlock(&cb, kAudioTrackNum, 4);
1677 AddSimpleBlock(&cb, kVideoTrackNum, 7);
1679 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1680 AppendCluster(cb.Finish());
1683 // Test the case where a cluster is passed to AppendCluster() before
1684 // INFO & TRACKS data.
1685 TEST_F(ChunkDemuxerTest, ClusterBeforeInitSegment) {
1686 EXPECT_CALL(*this, DemuxerOpened());
1687 demuxer_->Initialize(
1688 &host_, NewExpectedStatusCB(PIPELINE_ERROR_DECODE), true);
1690 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1692 AppendCluster(GenerateCluster(0, 1));
1695 // Test cases where we get an MarkEndOfStream() call during initialization.
1696 TEST_F(ChunkDemuxerTest, EOSDuringInit) {
1697 EXPECT_CALL(*this, DemuxerOpened());
1698 demuxer_->Initialize(
1699 &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1700 MarkEndOfStream(PIPELINE_OK);
1703 TEST_F(ChunkDemuxerTest, EndOfStreamWithNoAppend) {
1704 EXPECT_CALL(*this, DemuxerOpened());
1705 demuxer_->Initialize(
1706 &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1708 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1710 CheckExpectedRanges("{ }");
1711 MarkEndOfStream(PIPELINE_OK);
1712 ShutdownDemuxer();
1713 CheckExpectedRanges("{ }");
1714 demuxer_->RemoveId(kSourceId);
1715 demuxer_.reset();
1718 TEST_F(ChunkDemuxerTest, EndOfStreamWithNoMediaAppend) {
1719 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1721 CheckExpectedRanges("{ }");
1722 MarkEndOfStream(PIPELINE_OK);
1723 CheckExpectedRanges("{ }");
1726 TEST_F(ChunkDemuxerTest, DecodeErrorEndOfStream) {
1727 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1729 AppendCluster(kDefaultFirstCluster());
1730 CheckExpectedRanges(kDefaultFirstClusterRange);
1732 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1733 MarkEndOfStream(PIPELINE_ERROR_DECODE);
1734 CheckExpectedRanges(kDefaultFirstClusterRange);
1737 TEST_F(ChunkDemuxerTest, NetworkErrorEndOfStream) {
1738 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1740 AppendCluster(kDefaultFirstCluster());
1741 CheckExpectedRanges(kDefaultFirstClusterRange);
1743 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_NETWORK));
1744 MarkEndOfStream(PIPELINE_ERROR_NETWORK);
1747 // Helper class to reduce duplicate code when testing end of stream
1748 // Read() behavior.
1749 class EndOfStreamHelper {
1750 public:
1751 explicit EndOfStreamHelper(Demuxer* demuxer)
1752 : demuxer_(demuxer),
1753 audio_read_done_(false),
1754 video_read_done_(false) {
1757 // Request a read on the audio and video streams.
1758 void RequestReads() {
1759 EXPECT_FALSE(audio_read_done_);
1760 EXPECT_FALSE(video_read_done_);
1762 DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
1763 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
1765 audio->Read(base::Bind(&OnEndOfStreamReadDone, &audio_read_done_));
1766 video->Read(base::Bind(&OnEndOfStreamReadDone, &video_read_done_));
1767 base::MessageLoop::current()->RunUntilIdle();
1770 // Check to see if |audio_read_done_| and |video_read_done_| variables
1771 // match |expected|.
1772 void CheckIfReadDonesWereCalled(bool expected) {
1773 base::MessageLoop::current()->RunUntilIdle();
1774 EXPECT_EQ(expected, audio_read_done_);
1775 EXPECT_EQ(expected, video_read_done_);
1778 private:
1779 static void OnEndOfStreamReadDone(
1780 bool* called,
1781 DemuxerStream::Status status,
1782 const scoped_refptr<DecoderBuffer>& buffer) {
1783 EXPECT_EQ(status, DemuxerStream::kOk);
1784 EXPECT_TRUE(buffer->end_of_stream());
1785 *called = true;
1788 Demuxer* demuxer_;
1789 bool audio_read_done_;
1790 bool video_read_done_;
1792 DISALLOW_COPY_AND_ASSIGN(EndOfStreamHelper);
1795 // Make sure that all pending reads that we don't have media data for get an
1796 // "end of stream" buffer when MarkEndOfStream() is called.
1797 TEST_F(ChunkDemuxerTest, EndOfStreamWithPendingReads) {
1798 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1800 AppendCluster(GenerateCluster(0, 2));
1802 bool audio_read_done_1 = false;
1803 bool video_read_done_1 = false;
1804 EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
1805 EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
1807 ReadAudio(base::Bind(&OnReadDone,
1808 base::TimeDelta::FromMilliseconds(0),
1809 &audio_read_done_1));
1810 ReadVideo(base::Bind(&OnReadDone,
1811 base::TimeDelta::FromMilliseconds(0),
1812 &video_read_done_1));
1813 message_loop_.RunUntilIdle();
1815 EXPECT_TRUE(audio_read_done_1);
1816 EXPECT_TRUE(video_read_done_1);
1818 end_of_stream_helper_1.RequestReads();
1820 EXPECT_CALL(host_, SetDuration(
1821 base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
1822 MarkEndOfStream(PIPELINE_OK);
1824 end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
1826 end_of_stream_helper_2.RequestReads();
1827 end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
1830 // Make sure that all Read() calls after we get an MarkEndOfStream()
1831 // call return an "end of stream" buffer.
1832 TEST_F(ChunkDemuxerTest, ReadsAfterEndOfStream) {
1833 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1835 AppendCluster(GenerateCluster(0, 2));
1837 bool audio_read_done_1 = false;
1838 bool video_read_done_1 = false;
1839 EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
1840 EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
1841 EndOfStreamHelper end_of_stream_helper_3(demuxer_.get());
1843 ReadAudio(base::Bind(&OnReadDone,
1844 base::TimeDelta::FromMilliseconds(0),
1845 &audio_read_done_1));
1846 ReadVideo(base::Bind(&OnReadDone,
1847 base::TimeDelta::FromMilliseconds(0),
1848 &video_read_done_1));
1850 end_of_stream_helper_1.RequestReads();
1852 EXPECT_TRUE(audio_read_done_1);
1853 EXPECT_TRUE(video_read_done_1);
1854 end_of_stream_helper_1.CheckIfReadDonesWereCalled(false);
1856 EXPECT_CALL(host_, SetDuration(
1857 base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
1858 MarkEndOfStream(PIPELINE_OK);
1860 end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
1862 // Request a few more reads and make sure we immediately get
1863 // end of stream buffers.
1864 end_of_stream_helper_2.RequestReads();
1865 end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
1867 end_of_stream_helper_3.RequestReads();
1868 end_of_stream_helper_3.CheckIfReadDonesWereCalled(true);
1871 TEST_F(ChunkDemuxerTest, EndOfStreamDuringCanceledSeek) {
1872 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1874 AppendCluster(0, 10);
1875 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(138)));
1876 MarkEndOfStream(PIPELINE_OK);
1878 // Start the first seek.
1879 Seek(base::TimeDelta::FromMilliseconds(20));
1881 // Simulate another seek being requested before the first
1882 // seek has finished prerolling.
1883 base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(30);
1884 demuxer_->CancelPendingSeek(seek_time2);
1886 // Finish second seek.
1887 Seek(seek_time2);
1889 DemuxerStream::Status status;
1890 base::TimeDelta last_timestamp;
1892 // Make sure audio can reach end of stream.
1893 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
1894 ASSERT_EQ(status, DemuxerStream::kOk);
1896 // Make sure video can reach end of stream.
1897 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
1898 ASSERT_EQ(status, DemuxerStream::kOk);
1901 // Verify buffered range change behavior for audio/video/text tracks.
1902 TEST_F(ChunkDemuxerTest, EndOfStreamRangeChanges) {
1903 DemuxerStream* text_stream = NULL;
1905 EXPECT_CALL(host_, AddTextStream(_, _))
1906 .WillOnce(SaveArg<0>(&text_stream));
1907 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
1909 AppendMuxedCluster(
1910 MuxedStreamInfo(kVideoTrackNum, "0K 33"),
1911 MuxedStreamInfo(kAudioTrackNum, "0K 23K"));
1913 // Check expected ranges and verify that an empty text track does not
1914 // affect the expected ranges.
1915 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1917 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
1918 MarkEndOfStream(PIPELINE_OK);
1920 // Check expected ranges and verify that an empty text track does not
1921 // affect the expected ranges.
1922 CheckExpectedRanges(kSourceId, "{ [0,66) }");
1924 // Unmark end of stream state and verify that the ranges return to
1925 // their pre-"end of stream" values.
1926 demuxer_->UnmarkEndOfStream();
1927 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1929 // Add text track data and verify that the buffered ranges don't change
1930 // since the intersection of all the tracks doesn't change.
1931 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(200)));
1932 AppendMuxedCluster(
1933 MuxedStreamInfo(kVideoTrackNum, "0K 33"),
1934 MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
1935 MuxedStreamInfo(kTextTrackNum, "0K 100K"));
1936 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1938 // Mark end of stream and verify that text track data is reflected in
1939 // the new range.
1940 MarkEndOfStream(PIPELINE_OK);
1941 CheckExpectedRanges(kSourceId, "{ [0,200) }");
1944 // Make sure AppendData() will accept elements that span multiple calls.
1945 TEST_F(ChunkDemuxerTest, AppendingInPieces) {
1946 EXPECT_CALL(*this, DemuxerOpened());
1947 demuxer_->Initialize(
1948 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
1950 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1952 scoped_ptr<uint8[]> info_tracks;
1953 int info_tracks_size = 0;
1954 CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
1955 false, false, &info_tracks, &info_tracks_size);
1957 scoped_ptr<Cluster> cluster_a(kDefaultFirstCluster());
1958 scoped_ptr<Cluster> cluster_b(kDefaultSecondCluster());
1960 size_t buffer_size = info_tracks_size + cluster_a->size() + cluster_b->size();
1961 scoped_ptr<uint8[]> buffer(new uint8[buffer_size]);
1962 uint8* dst = buffer.get();
1963 memcpy(dst, info_tracks.get(), info_tracks_size);
1964 dst += info_tracks_size;
1966 memcpy(dst, cluster_a->data(), cluster_a->size());
1967 dst += cluster_a->size();
1969 memcpy(dst, cluster_b->data(), cluster_b->size());
1970 dst += cluster_b->size();
1972 EXPECT_CALL(*this, InitSegmentReceived());
1973 AppendDataInPieces(buffer.get(), buffer_size);
1975 GenerateExpectedReads(0, 9);
1978 TEST_F(ChunkDemuxerTest, WebMFile_AudioAndVideo) {
1979 struct BufferTimestamps buffer_timestamps[] = {
1980 {0, 0},
1981 {33, 3},
1982 {67, 6},
1983 {100, 9},
1984 {133, 12},
1985 {kSkip, kSkip},
1988 // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
1989 // ParseWebMFile() call's expected duration, below, once the file is fixed to
1990 // have the correct duration in the init segment. See http://crbug.com/354284.
1991 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
1993 ASSERT_TRUE(ParseWebMFile("bear-320x240.webm", buffer_timestamps,
1994 base::TimeDelta::FromMilliseconds(2744)));
1997 TEST_F(ChunkDemuxerTest, WebMFile_LiveAudioAndVideo) {
1998 struct BufferTimestamps buffer_timestamps[] = {
1999 {0, 0},
2000 {33, 3},
2001 {67, 6},
2002 {100, 9},
2003 {133, 12},
2004 {kSkip, kSkip},
2007 ASSERT_TRUE(ParseWebMFile("bear-320x240-live.webm", buffer_timestamps,
2008 kInfiniteDuration()));
2010 DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
2011 EXPECT_EQ(DemuxerStream::LIVENESS_LIVE, audio->liveness());
2012 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2013 EXPECT_EQ(DemuxerStream::LIVENESS_LIVE, video->liveness());
2016 TEST_F(ChunkDemuxerTest, WebMFile_AudioOnly) {
2017 struct BufferTimestamps buffer_timestamps[] = {
2018 {kSkip, 0},
2019 {kSkip, 3},
2020 {kSkip, 6},
2021 {kSkip, 9},
2022 {kSkip, 12},
2023 {kSkip, kSkip},
2026 // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
2027 // ParseWebMFile() call's expected duration, below, once the file is fixed to
2028 // have the correct duration in the init segment. See http://crbug.com/354284.
2029 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
2031 ASSERT_TRUE(ParseWebMFile("bear-320x240-audio-only.webm", buffer_timestamps,
2032 base::TimeDelta::FromMilliseconds(2744),
2033 HAS_AUDIO));
2036 TEST_F(ChunkDemuxerTest, WebMFile_VideoOnly) {
2037 struct BufferTimestamps buffer_timestamps[] = {
2038 {0, kSkip},
2039 {33, kSkip},
2040 {67, kSkip},
2041 {100, kSkip},
2042 {133, kSkip},
2043 {kSkip, kSkip},
2046 // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
2047 // ParseWebMFile() call's expected duration, below, once the file is fixed to
2048 // have the correct duration in the init segment. See http://crbug.com/354284.
2049 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2736)));
2051 ASSERT_TRUE(ParseWebMFile("bear-320x240-video-only.webm", buffer_timestamps,
2052 base::TimeDelta::FromMilliseconds(2703),
2053 HAS_VIDEO));
2056 TEST_F(ChunkDemuxerTest, WebMFile_AltRefFrames) {
2057 struct BufferTimestamps buffer_timestamps[] = {
2058 {0, 0},
2059 {33, 3},
2060 {33, 6},
2061 {67, 9},
2062 {100, 12},
2063 {kSkip, kSkip},
2066 ASSERT_TRUE(ParseWebMFile("bear-320x240-altref.webm", buffer_timestamps,
2067 base::TimeDelta::FromMilliseconds(2767)));
2070 // Verify that we output buffers before the entire cluster has been parsed.
2071 TEST_F(ChunkDemuxerTest, IncrementalClusterParsing) {
2072 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2073 AppendEmptyCluster(0);
2075 scoped_ptr<Cluster> cluster(GenerateCluster(0, 6));
2077 bool audio_read_done = false;
2078 bool video_read_done = false;
2079 ReadAudio(base::Bind(&OnReadDone,
2080 base::TimeDelta::FromMilliseconds(0),
2081 &audio_read_done));
2082 ReadVideo(base::Bind(&OnReadDone,
2083 base::TimeDelta::FromMilliseconds(0),
2084 &video_read_done));
2086 // Make sure the reads haven't completed yet.
2087 EXPECT_FALSE(audio_read_done);
2088 EXPECT_FALSE(video_read_done);
2090 // Append data one byte at a time until one or both reads complete.
2091 int i = 0;
2092 for (; i < cluster->size() && !(audio_read_done || video_read_done); ++i) {
2093 AppendData(cluster->data() + i, 1);
2094 message_loop_.RunUntilIdle();
2097 EXPECT_TRUE(audio_read_done || video_read_done);
2098 EXPECT_GT(i, 0);
2099 EXPECT_LT(i, cluster->size());
2101 audio_read_done = false;
2102 video_read_done = false;
2103 ReadAudio(base::Bind(&OnReadDone,
2104 base::TimeDelta::FromMilliseconds(23),
2105 &audio_read_done));
2106 ReadVideo(base::Bind(&OnReadDone,
2107 base::TimeDelta::FromMilliseconds(33),
2108 &video_read_done));
2110 // Make sure the reads haven't completed yet.
2111 EXPECT_FALSE(audio_read_done);
2112 EXPECT_FALSE(video_read_done);
2114 // Append the remaining data.
2115 ASSERT_LT(i, cluster->size());
2116 AppendData(cluster->data() + i, cluster->size() - i);
2118 message_loop_.RunUntilIdle();
2120 EXPECT_TRUE(audio_read_done);
2121 EXPECT_TRUE(video_read_done);
2124 TEST_F(ChunkDemuxerTest, ParseErrorDuringInit) {
2125 EXPECT_CALL(*this, DemuxerOpened());
2126 demuxer_->Initialize(
2127 &host_, CreateInitDoneCB(
2128 kNoTimestamp(), PIPELINE_ERROR_DECODE), true);
2130 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
2132 uint8 tmp = 0;
2133 demuxer_->AppendData(kSourceId, &tmp, 1,
2134 append_window_start_for_next_append_,
2135 append_window_end_for_next_append_,
2136 &timestamp_offset_map_[kSourceId],
2137 init_segment_received_cb_);
2140 TEST_F(ChunkDemuxerTest, AVHeadersWithAudioOnlyType) {
2141 EXPECT_CALL(*this, DemuxerOpened());
2142 demuxer_->Initialize(
2143 &host_, CreateInitDoneCB(kNoTimestamp(),
2144 PIPELINE_ERROR_DECODE), true);
2146 std::vector<std::string> codecs(1);
2147 codecs[0] = "vorbis";
2148 ASSERT_EQ(demuxer_->AddId(kSourceId, "audio/webm", codecs),
2149 ChunkDemuxer::kOk);
2151 AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
2154 TEST_F(ChunkDemuxerTest, AVHeadersWithVideoOnlyType) {
2155 EXPECT_CALL(*this, DemuxerOpened());
2156 demuxer_->Initialize(
2157 &host_, CreateInitDoneCB(kNoTimestamp(),
2158 PIPELINE_ERROR_DECODE), true);
2160 std::vector<std::string> codecs(1);
2161 codecs[0] = "vp8";
2162 ASSERT_EQ(demuxer_->AddId(kSourceId, "video/webm", codecs),
2163 ChunkDemuxer::kOk);
2165 AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
2168 TEST_F(ChunkDemuxerTest, MultipleHeaders) {
2169 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2171 AppendCluster(kDefaultFirstCluster());
2173 // Append another identical initialization segment.
2174 EXPECT_CALL(*this, InitSegmentReceived());
2175 AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
2177 AppendCluster(kDefaultSecondCluster());
2179 GenerateExpectedReads(0, 9);
2182 TEST_F(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideo) {
2183 std::string audio_id = "audio1";
2184 std::string video_id = "video1";
2185 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2187 // Append audio and video data into separate source ids.
2188 AppendCluster(audio_id,
2189 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2190 GenerateAudioStreamExpectedReads(0, 4);
2191 AppendCluster(video_id,
2192 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2193 GenerateVideoStreamExpectedReads(0, 4);
2196 TEST_F(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideoText) {
2197 // TODO(matthewjheaney): Here and elsewhere, we need more tests
2198 // for inband text tracks (http://crbug/321455).
2200 std::string audio_id = "audio1";
2201 std::string video_id = "video1";
2203 EXPECT_CALL(host_, AddTextStream(_, _))
2204 .Times(Exactly(2));
2205 ASSERT_TRUE(InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, true));
2207 // Append audio and video data into separate source ids.
2208 AppendCluster(audio_id,
2209 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2210 GenerateAudioStreamExpectedReads(0, 4);
2211 AppendCluster(video_id,
2212 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2213 GenerateVideoStreamExpectedReads(0, 4);
2216 TEST_F(ChunkDemuxerTest, AddIdFailures) {
2217 EXPECT_CALL(*this, DemuxerOpened());
2218 demuxer_->Initialize(
2219 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2221 std::string audio_id = "audio1";
2222 std::string video_id = "video1";
2224 ASSERT_EQ(AddId(audio_id, HAS_AUDIO), ChunkDemuxer::kOk);
2226 // Adding an id with audio/video should fail because we already added audio.
2227 ASSERT_EQ(AddId(), ChunkDemuxer::kReachedIdLimit);
2229 EXPECT_CALL(*this, InitSegmentReceived());
2230 AppendInitSegmentWithSourceId(audio_id, HAS_AUDIO);
2232 // Adding an id after append should fail.
2233 ASSERT_EQ(AddId(video_id, HAS_VIDEO), ChunkDemuxer::kReachedIdLimit);
2236 // Test that Read() calls after a RemoveId() return "end of stream" buffers.
2237 TEST_F(ChunkDemuxerTest, RemoveId) {
2238 std::string audio_id = "audio1";
2239 std::string video_id = "video1";
2240 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2242 // Append audio and video data into separate source ids.
2243 AppendCluster(audio_id,
2244 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2245 AppendCluster(video_id,
2246 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2248 // Read() from audio should return normal buffers.
2249 GenerateAudioStreamExpectedReads(0, 4);
2251 // Remove the audio id.
2252 demuxer_->RemoveId(audio_id);
2254 // Read() from audio should return "end of stream" buffers.
2255 bool audio_read_done = false;
2256 ReadAudio(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
2257 message_loop_.RunUntilIdle();
2258 EXPECT_TRUE(audio_read_done);
2260 // Read() from video should still return normal buffers.
2261 GenerateVideoStreamExpectedReads(0, 4);
2264 // Test that removing an ID immediately after adding it does not interfere with
2265 // quota for new IDs in the future.
2266 TEST_F(ChunkDemuxerTest, RemoveAndAddId) {
2267 std::string audio_id_1 = "audio1";
2268 ASSERT_TRUE(AddId(audio_id_1, HAS_AUDIO) == ChunkDemuxer::kOk);
2269 demuxer_->RemoveId(audio_id_1);
2271 std::string audio_id_2 = "audio2";
2272 ASSERT_TRUE(AddId(audio_id_2, HAS_AUDIO) == ChunkDemuxer::kOk);
2275 TEST_F(ChunkDemuxerTest, SeekCanceled) {
2276 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2278 // Append cluster at the beginning of the stream.
2279 AppendCluster(GenerateCluster(0, 4));
2281 // Seek to an unbuffered region.
2282 Seek(base::TimeDelta::FromSeconds(50));
2284 // Attempt to read in unbuffered area; should not fulfill the read.
2285 bool audio_read_done = false;
2286 bool video_read_done = false;
2287 ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
2288 ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
2289 EXPECT_FALSE(audio_read_done);
2290 EXPECT_FALSE(video_read_done);
2292 // Now cancel the pending seek, which should flush the reads with empty
2293 // buffers.
2294 base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
2295 demuxer_->CancelPendingSeek(seek_time);
2296 message_loop_.RunUntilIdle();
2297 EXPECT_TRUE(audio_read_done);
2298 EXPECT_TRUE(video_read_done);
2300 // A seek back to the buffered region should succeed.
2301 Seek(seek_time);
2302 GenerateExpectedReads(0, 4);
2305 TEST_F(ChunkDemuxerTest, SeekCanceledWhileWaitingForSeek) {
2306 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2308 // Append cluster at the beginning of the stream.
2309 AppendCluster(GenerateCluster(0, 4));
2311 // Start waiting for a seek.
2312 base::TimeDelta seek_time1 = base::TimeDelta::FromSeconds(50);
2313 base::TimeDelta seek_time2 = base::TimeDelta::FromSeconds(0);
2314 demuxer_->StartWaitingForSeek(seek_time1);
2316 // Now cancel the upcoming seek to an unbuffered region.
2317 demuxer_->CancelPendingSeek(seek_time2);
2318 demuxer_->Seek(seek_time1, NewExpectedStatusCB(PIPELINE_OK));
2320 // Read requests should be fulfilled with empty buffers.
2321 bool audio_read_done = false;
2322 bool video_read_done = false;
2323 ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
2324 ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
2325 EXPECT_TRUE(audio_read_done);
2326 EXPECT_TRUE(video_read_done);
2328 // A seek back to the buffered region should succeed.
2329 Seek(seek_time2);
2330 GenerateExpectedReads(0, 4);
2333 // Test that Seek() successfully seeks to all source IDs.
2334 TEST_F(ChunkDemuxerTest, SeekAudioAndVideoSources) {
2335 std::string audio_id = "audio1";
2336 std::string video_id = "video1";
2337 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2339 AppendCluster(
2340 audio_id,
2341 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2342 AppendCluster(
2343 video_id,
2344 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2346 // Read() should return buffers at 0.
2347 bool audio_read_done = false;
2348 bool video_read_done = false;
2349 ReadAudio(base::Bind(&OnReadDone,
2350 base::TimeDelta::FromMilliseconds(0),
2351 &audio_read_done));
2352 ReadVideo(base::Bind(&OnReadDone,
2353 base::TimeDelta::FromMilliseconds(0),
2354 &video_read_done));
2355 EXPECT_TRUE(audio_read_done);
2356 EXPECT_TRUE(video_read_done);
2358 // Seek to 3 (an unbuffered region).
2359 Seek(base::TimeDelta::FromSeconds(3));
2361 audio_read_done = false;
2362 video_read_done = false;
2363 ReadAudio(base::Bind(&OnReadDone,
2364 base::TimeDelta::FromSeconds(3),
2365 &audio_read_done));
2366 ReadVideo(base::Bind(&OnReadDone,
2367 base::TimeDelta::FromSeconds(3),
2368 &video_read_done));
2369 // Read()s should not return until after data is appended at the Seek point.
2370 EXPECT_FALSE(audio_read_done);
2371 EXPECT_FALSE(video_read_done);
2373 AppendCluster(audio_id,
2374 GenerateSingleStreamCluster(
2375 3000, 3092, kAudioTrackNum, kAudioBlockDuration));
2376 AppendCluster(video_id,
2377 GenerateSingleStreamCluster(
2378 3000, 3132, kVideoTrackNum, kVideoBlockDuration));
2380 message_loop_.RunUntilIdle();
2382 // Read() should return buffers at 3.
2383 EXPECT_TRUE(audio_read_done);
2384 EXPECT_TRUE(video_read_done);
2387 // Test that Seek() completes successfully when EndOfStream
2388 // is called before data is available for that seek point.
2389 // This scenario might be useful if seeking past the end of stream
2390 // of either audio or video (or both).
2391 TEST_F(ChunkDemuxerTest, EndOfStreamAfterPastEosSeek) {
2392 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2394 AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
2395 AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
2397 // Seeking past the end of video.
2398 // Note: audio data is available for that seek point.
2399 bool seek_cb_was_called = false;
2400 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(110);
2401 demuxer_->StartWaitingForSeek(seek_time);
2402 demuxer_->Seek(seek_time,
2403 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
2404 message_loop_.RunUntilIdle();
2406 EXPECT_FALSE(seek_cb_was_called);
2408 EXPECT_CALL(host_, SetDuration(
2409 base::TimeDelta::FromMilliseconds(120)));
2410 MarkEndOfStream(PIPELINE_OK);
2411 message_loop_.RunUntilIdle();
2413 EXPECT_TRUE(seek_cb_was_called);
2415 ShutdownDemuxer();
2418 // Test that EndOfStream is ignored if coming during a pending seek
2419 // whose seek time is before some existing ranges.
2420 TEST_F(ChunkDemuxerTest, EndOfStreamDuringPendingSeek) {
2421 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2423 AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
2424 AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
2425 AppendCluster(GenerateSingleStreamCluster(200, 300, kAudioTrackNum, 10));
2426 AppendCluster(GenerateSingleStreamCluster(200, 300, kVideoTrackNum, 5));
2428 bool seek_cb_was_called = false;
2429 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(160);
2430 demuxer_->StartWaitingForSeek(seek_time);
2431 demuxer_->Seek(seek_time,
2432 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
2433 message_loop_.RunUntilIdle();
2435 EXPECT_FALSE(seek_cb_was_called);
2437 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(300)));
2438 MarkEndOfStream(PIPELINE_OK);
2439 message_loop_.RunUntilIdle();
2441 EXPECT_FALSE(seek_cb_was_called);
2443 demuxer_->UnmarkEndOfStream();
2445 AppendCluster(GenerateSingleStreamCluster(140, 180, kAudioTrackNum, 10));
2446 AppendCluster(GenerateSingleStreamCluster(140, 180, kVideoTrackNum, 5));
2448 message_loop_.RunUntilIdle();
2450 EXPECT_TRUE(seek_cb_was_called);
2452 ShutdownDemuxer();
2455 // Test ranges in an audio-only stream.
2456 TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioIdOnly) {
2457 EXPECT_CALL(*this, DemuxerOpened());
2458 demuxer_->Initialize(
2459 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2461 ASSERT_EQ(AddId(kSourceId, HAS_AUDIO), ChunkDemuxer::kOk);
2462 EXPECT_CALL(*this, InitSegmentReceived());
2463 AppendInitSegment(HAS_AUDIO);
2465 // Test a simple cluster.
2466 AppendCluster(
2467 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2469 CheckExpectedRanges("{ [0,92) }");
2471 // Append a disjoint cluster to check for two separate ranges.
2472 AppendCluster(GenerateSingleStreamCluster(
2473 150, 219, kAudioTrackNum, kAudioBlockDuration));
2475 CheckExpectedRanges("{ [0,92) [150,219) }");
2478 // Test ranges in a video-only stream.
2479 TEST_F(ChunkDemuxerTest, GetBufferedRanges_VideoIdOnly) {
2480 EXPECT_CALL(*this, DemuxerOpened());
2481 demuxer_->Initialize(
2482 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2484 ASSERT_EQ(AddId(kSourceId, HAS_VIDEO), ChunkDemuxer::kOk);
2485 EXPECT_CALL(*this, InitSegmentReceived());
2486 AppendInitSegment(HAS_VIDEO);
2488 // Test a simple cluster.
2489 AppendCluster(
2490 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2492 CheckExpectedRanges("{ [0,132) }");
2494 // Append a disjoint cluster to check for two separate ranges.
2495 AppendCluster(GenerateSingleStreamCluster(
2496 200, 299, kVideoTrackNum, kVideoBlockDuration));
2498 CheckExpectedRanges("{ [0,132) [200,299) }");
2501 TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideo) {
2502 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2504 // Audio: 0 -> 23
2505 // Video: 0 -> 33
2506 // Buffered Range: 0 -> 23
2507 // Audio block duration is smaller than video block duration,
2508 // so the buffered ranges should correspond to the audio blocks.
2509 AppendCluster(GenerateSingleStreamCluster(
2510 0, kAudioBlockDuration, kAudioTrackNum, kAudioBlockDuration));
2511 AppendCluster(GenerateSingleStreamCluster(
2512 0, kVideoBlockDuration, kVideoTrackNum, kVideoBlockDuration));
2514 CheckExpectedRanges("{ [0,23) }");
2516 // Audio: 300 -> 400
2517 // Video: 320 -> 420
2518 // Buffered Range: 320 -> 400 (end overlap)
2519 AppendCluster(GenerateSingleStreamCluster(300, 400, kAudioTrackNum, 50));
2520 AppendCluster(GenerateSingleStreamCluster(320, 420, kVideoTrackNum, 50));
2522 CheckExpectedRanges("{ [0,23) [320,400) }");
2524 // Audio: 520 -> 590
2525 // Video: 500 -> 570
2526 // Buffered Range: 520 -> 570 (front overlap)
2527 AppendCluster(GenerateSingleStreamCluster(520, 590, kAudioTrackNum, 70));
2528 AppendCluster(GenerateSingleStreamCluster(500, 570, kVideoTrackNum, 70));
2530 CheckExpectedRanges("{ [0,23) [320,400) [520,570) }");
2532 // Audio: 720 -> 750
2533 // Video: 700 -> 770
2534 // Buffered Range: 720 -> 750 (complete overlap, audio)
2535 AppendCluster(GenerateSingleStreamCluster(720, 750, kAudioTrackNum, 30));
2536 AppendCluster(GenerateSingleStreamCluster(700, 770, kVideoTrackNum, 70));
2538 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) }");
2540 // Audio: 900 -> 970
2541 // Video: 920 -> 950
2542 // Buffered Range: 920 -> 950 (complete overlap, video)
2543 AppendCluster(GenerateSingleStreamCluster(900, 970, kAudioTrackNum, 70));
2544 AppendCluster(GenerateSingleStreamCluster(920, 950, kVideoTrackNum, 30));
2546 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2548 // Appending within buffered range should not affect buffered ranges.
2549 AppendCluster(GenerateSingleStreamCluster(930, 950, kAudioTrackNum, 20));
2550 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2552 // Appending to single stream outside buffered ranges should not affect
2553 // buffered ranges.
2554 AppendCluster(GenerateSingleStreamCluster(1230, 1240, kVideoTrackNum, 10));
2555 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2558 TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideoText) {
2559 EXPECT_CALL(host_, AddTextStream(_, _));
2560 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
2562 // Append audio & video data
2563 AppendMuxedCluster(
2564 MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
2565 MuxedStreamInfo(kVideoTrackNum, "0K 33"));
2567 // Verify that a text track with no cues does not result in an empty buffered
2568 // range.
2569 CheckExpectedRanges("{ [0,46) }");
2571 // Add some text cues.
2572 AppendMuxedCluster(
2573 MuxedStreamInfo(kAudioTrackNum, "100K 123K"),
2574 MuxedStreamInfo(kVideoTrackNum, "100K 133"),
2575 MuxedStreamInfo(kTextTrackNum, "100K 200K"));
2577 // Verify that the text cues are not reflected in the buffered ranges.
2578 CheckExpectedRanges("{ [0,46) [100,146) }");
2580 // Remove the buffered ranges.
2581 demuxer_->Remove(kSourceId, base::TimeDelta(),
2582 base::TimeDelta::FromMilliseconds(250));
2583 CheckExpectedRanges("{ }");
2586 // Once MarkEndOfStream() is called, GetBufferedRanges should not cut off any
2587 // over-hanging tails at the end of the ranges as this is likely due to block
2588 // duration differences.
2589 TEST_F(ChunkDemuxerTest, GetBufferedRanges_EndOfStream) {
2590 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2592 AppendMuxedCluster(
2593 MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
2594 MuxedStreamInfo(kVideoTrackNum, "0K 33"));
2596 CheckExpectedRanges("{ [0,46) }");
2598 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
2599 MarkEndOfStream(PIPELINE_OK);
2601 // Verify that the range extends to the end of the video data.
2602 CheckExpectedRanges("{ [0,66) }");
2604 // Verify that the range reverts to the intersection when end of stream
2605 // has been cancelled.
2606 demuxer_->UnmarkEndOfStream();
2607 CheckExpectedRanges("{ [0,46) }");
2609 // Append and remove data so that the 2 streams' end ranges do not overlap.
2611 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(398)));
2612 AppendMuxedCluster(
2613 MuxedStreamInfo(kAudioTrackNum, "200K 223K"),
2614 MuxedStreamInfo(kVideoTrackNum, "200K 233 266 299 332K 365"));
2616 // At this point, the per-stream ranges are as follows:
2617 // Audio: [0,46) [200,246)
2618 // Video: [0,66) [200,398)
2619 CheckExpectedRanges("{ [0,46) [200,246) }");
2621 demuxer_->Remove(kSourceId, base::TimeDelta::FromMilliseconds(200),
2622 base::TimeDelta::FromMilliseconds(300));
2624 // At this point, the per-stream ranges are as follows:
2625 // Audio: [0,46)
2626 // Video: [0,66) [332,398)
2627 CheckExpectedRanges("{ [0,46) }");
2629 AppendMuxedCluster(
2630 MuxedStreamInfo(kAudioTrackNum, "200K 223K"),
2631 MuxedStreamInfo(kVideoTrackNum, "200K 233"));
2633 // At this point, the per-stream ranges are as follows:
2634 // Audio: [0,46) [200,246)
2635 // Video: [0,66) [200,266) [332,398)
2636 // NOTE: The last range on each stream do not overlap in time.
2637 CheckExpectedRanges("{ [0,46) [200,246) }");
2639 MarkEndOfStream(PIPELINE_OK);
2641 // NOTE: The last range on each stream gets extended to the highest
2642 // end timestamp according to the spec. The last audio range gets extended
2643 // from [200,246) to [200,398) which is why the intersection results in the
2644 // middle range getting larger AND the new range appearing.
2645 CheckExpectedRanges("{ [0,46) [200,266) [332,398) }");
2648 TEST_F(ChunkDemuxerTest, DifferentStreamTimecodes) {
2649 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2651 // Create a cluster where the video timecode begins 25ms after the audio.
2652 AppendCluster(GenerateCluster(0, 25, 8));
2654 Seek(base::TimeDelta::FromSeconds(0));
2655 GenerateExpectedReads(0, 25, 8);
2657 // Seek to 5 seconds.
2658 Seek(base::TimeDelta::FromSeconds(5));
2660 // Generate a cluster to fulfill this seek, where audio timecode begins 25ms
2661 // after the video.
2662 AppendCluster(GenerateCluster(5025, 5000, 8));
2663 GenerateExpectedReads(5025, 5000, 8);
2666 TEST_F(ChunkDemuxerTest, DifferentStreamTimecodesSeparateSources) {
2667 std::string audio_id = "audio1";
2668 std::string video_id = "video1";
2669 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2671 // Generate two streams where the video stream starts 5ms after the audio
2672 // stream and append them.
2673 AppendCluster(audio_id, GenerateSingleStreamCluster(
2674 25, 4 * kAudioBlockDuration + 25, kAudioTrackNum, kAudioBlockDuration));
2675 AppendCluster(video_id, GenerateSingleStreamCluster(
2676 30, 4 * kVideoBlockDuration + 30, kVideoTrackNum, kVideoBlockDuration));
2678 // Both streams should be able to fulfill a seek to 25.
2679 Seek(base::TimeDelta::FromMilliseconds(25));
2680 GenerateAudioStreamExpectedReads(25, 4);
2681 GenerateVideoStreamExpectedReads(30, 4);
2684 TEST_F(ChunkDemuxerTest, DifferentStreamTimecodesOutOfRange) {
2685 std::string audio_id = "audio1";
2686 std::string video_id = "video1";
2687 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2689 // Generate two streams where the video stream starts 10s after the audio
2690 // stream and append them.
2691 AppendCluster(audio_id, GenerateSingleStreamCluster(0,
2692 4 * kAudioBlockDuration + 0, kAudioTrackNum, kAudioBlockDuration));
2693 AppendCluster(video_id, GenerateSingleStreamCluster(10000,
2694 4 * kVideoBlockDuration + 10000, kVideoTrackNum, kVideoBlockDuration));
2696 // Should not be able to fulfill a seek to 0.
2697 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(0);
2698 demuxer_->StartWaitingForSeek(seek_time);
2699 demuxer_->Seek(seek_time,
2700 NewExpectedStatusCB(PIPELINE_ERROR_ABORT));
2701 ExpectRead(DemuxerStream::AUDIO, 0);
2702 ExpectEndOfStream(DemuxerStream::VIDEO);
2705 TEST_F(ChunkDemuxerTest, ClusterWithNoBuffers) {
2706 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2708 // Generate and append an empty cluster beginning at 0.
2709 AppendEmptyCluster(0);
2711 // Sanity check that data can be appended after this cluster correctly.
2712 AppendCluster(GenerateCluster(0, 2));
2713 ExpectRead(DemuxerStream::AUDIO, 0);
2714 ExpectRead(DemuxerStream::VIDEO, 0);
2717 TEST_F(ChunkDemuxerTest, CodecPrefixMatching) {
2718 ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
2720 #if defined(USE_PROPRIETARY_CODECS)
2721 expected = ChunkDemuxer::kOk;
2722 #endif
2724 std::vector<std::string> codecs;
2725 codecs.push_back("avc1.4D4041");
2727 EXPECT_EQ(demuxer_->AddId("source_id", "video/mp4", codecs), expected);
2730 // Test codec ID's that are not compliant with RFC6381, but have been
2731 // seen in the wild.
2732 TEST_F(ChunkDemuxerTest, CodecIDsThatAreNotRFC6381Compliant) {
2733 ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
2735 #if defined(USE_PROPRIETARY_CODECS)
2736 expected = ChunkDemuxer::kOk;
2737 #endif
2738 const char* codec_ids[] = {
2739 // GPAC places leading zeros on the audio object type.
2740 "mp4a.40.02",
2741 "mp4a.40.05"
2744 for (size_t i = 0; i < arraysize(codec_ids); ++i) {
2745 std::vector<std::string> codecs;
2746 codecs.push_back(codec_ids[i]);
2748 ChunkDemuxer::Status result =
2749 demuxer_->AddId("source_id", "audio/mp4", codecs);
2751 EXPECT_EQ(result, expected)
2752 << "Fail to add codec_id '" << codec_ids[i] << "'";
2754 if (result == ChunkDemuxer::kOk)
2755 demuxer_->RemoveId("source_id");
2759 TEST_F(ChunkDemuxerTest, EndOfStreamStillSetAfterSeek) {
2760 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2762 EXPECT_CALL(host_, SetDuration(_))
2763 .Times(AnyNumber());
2765 base::TimeDelta kLastAudioTimestamp = base::TimeDelta::FromMilliseconds(92);
2766 base::TimeDelta kLastVideoTimestamp = base::TimeDelta::FromMilliseconds(99);
2768 AppendCluster(kDefaultFirstCluster());
2769 AppendCluster(kDefaultSecondCluster());
2770 MarkEndOfStream(PIPELINE_OK);
2772 DemuxerStream::Status status;
2773 base::TimeDelta last_timestamp;
2775 // Verify that we can read audio & video to the end w/o problems.
2776 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2777 EXPECT_EQ(DemuxerStream::kOk, status);
2778 EXPECT_EQ(kLastAudioTimestamp, last_timestamp);
2780 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2781 EXPECT_EQ(DemuxerStream::kOk, status);
2782 EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
2784 // Seek back to 0 and verify that we can read to the end again..
2785 Seek(base::TimeDelta::FromMilliseconds(0));
2787 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2788 EXPECT_EQ(DemuxerStream::kOk, status);
2789 EXPECT_EQ(kLastAudioTimestamp, last_timestamp);
2791 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2792 EXPECT_EQ(DemuxerStream::kOk, status);
2793 EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
2796 TEST_F(ChunkDemuxerTest, GetBufferedRangesBeforeInitSegment) {
2797 EXPECT_CALL(*this, DemuxerOpened());
2798 demuxer_->Initialize(&host_, CreateInitDoneCB(PIPELINE_OK), true);
2799 ASSERT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
2800 ASSERT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
2802 CheckExpectedRanges("audio", "{ }");
2803 CheckExpectedRanges("video", "{ }");
2806 // Test that Seek() completes successfully when the first cluster
2807 // arrives.
2808 TEST_F(ChunkDemuxerTest, EndOfStreamDuringSeek) {
2809 InSequence s;
2811 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2813 AppendCluster(kDefaultFirstCluster());
2815 base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
2816 demuxer_->StartWaitingForSeek(seek_time);
2818 AppendCluster(kDefaultSecondCluster());
2819 EXPECT_CALL(host_, SetDuration(
2820 base::TimeDelta::FromMilliseconds(kDefaultSecondClusterEndTimestamp)));
2821 MarkEndOfStream(PIPELINE_OK);
2823 demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
2825 GenerateExpectedReads(0, 4);
2826 GenerateExpectedReads(46, 66, 5);
2828 EndOfStreamHelper end_of_stream_helper(demuxer_.get());
2829 end_of_stream_helper.RequestReads();
2830 end_of_stream_helper.CheckIfReadDonesWereCalled(true);
2833 TEST_F(ChunkDemuxerTest, ConfigChange_Video) {
2834 InSequence s;
2836 ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2838 DemuxerStream::Status status;
2839 base::TimeDelta last_timestamp;
2841 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2843 // Fetch initial video config and verify it matches what we expect.
2844 const VideoDecoderConfig& video_config_1 = video->video_decoder_config();
2845 ASSERT_TRUE(video_config_1.IsValidConfig());
2846 EXPECT_EQ(video_config_1.natural_size().width(), 320);
2847 EXPECT_EQ(video_config_1.natural_size().height(), 240);
2849 ExpectRead(DemuxerStream::VIDEO, 0);
2851 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2853 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2854 EXPECT_EQ(last_timestamp.InMilliseconds(), 501);
2856 // Fetch the new decoder config.
2857 const VideoDecoderConfig& video_config_2 = video->video_decoder_config();
2858 ASSERT_TRUE(video_config_2.IsValidConfig());
2859 EXPECT_EQ(video_config_2.natural_size().width(), 640);
2860 EXPECT_EQ(video_config_2.natural_size().height(), 360);
2862 ExpectRead(DemuxerStream::VIDEO, 527);
2864 // Read until the next config change.
2865 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2866 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2867 EXPECT_EQ(last_timestamp.InMilliseconds(), 793);
2869 // Get the new config and verify that it matches the first one.
2870 ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2872 ExpectRead(DemuxerStream::VIDEO, 801);
2874 // Read until the end of the stream just to make sure there aren't any other
2875 // config changes.
2876 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2877 ASSERT_EQ(status, DemuxerStream::kOk);
2880 TEST_F(ChunkDemuxerTest, ConfigChange_Audio) {
2881 InSequence s;
2883 ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2885 DemuxerStream::Status status;
2886 base::TimeDelta last_timestamp;
2888 DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
2890 // Fetch initial audio config and verify it matches what we expect.
2891 const AudioDecoderConfig& audio_config_1 = audio->audio_decoder_config();
2892 ASSERT_TRUE(audio_config_1.IsValidConfig());
2893 EXPECT_EQ(audio_config_1.samples_per_second(), 44100);
2894 EXPECT_EQ(audio_config_1.extra_data_size(), 3863u);
2896 ExpectRead(DemuxerStream::AUDIO, 0);
2898 // The first config change seen is from a splice frame representing an overlap
2899 // of buffer from config 1 by buffers from config 2.
2900 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2901 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2902 EXPECT_EQ(last_timestamp.InMilliseconds(), 524);
2904 // Fetch the new decoder config.
2905 const AudioDecoderConfig& audio_config_2 = audio->audio_decoder_config();
2906 ASSERT_TRUE(audio_config_2.IsValidConfig());
2907 EXPECT_EQ(audio_config_2.samples_per_second(), 44100);
2908 EXPECT_EQ(audio_config_2.extra_data_size(), 3935u);
2910 // The next config change is from a splice frame representing an overlap of
2911 // buffers from config 2 by buffers from config 1.
2912 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2913 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2914 EXPECT_EQ(last_timestamp.InMilliseconds(), 782);
2915 ASSERT_TRUE(audio_config_1.Matches(audio->audio_decoder_config()));
2917 // Read until the end of the stream just to make sure there aren't any other
2918 // config changes.
2919 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2920 ASSERT_EQ(status, DemuxerStream::kOk);
2921 EXPECT_EQ(last_timestamp.InMilliseconds(), 2744);
2924 TEST_F(ChunkDemuxerTest, ConfigChange_Seek) {
2925 InSequence s;
2927 ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2929 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2931 // Fetch initial video config and verify it matches what we expect.
2932 const VideoDecoderConfig& video_config_1 = video->video_decoder_config();
2933 ASSERT_TRUE(video_config_1.IsValidConfig());
2934 EXPECT_EQ(video_config_1.natural_size().width(), 320);
2935 EXPECT_EQ(video_config_1.natural_size().height(), 240);
2937 ExpectRead(DemuxerStream::VIDEO, 0);
2939 // Seek to a location with a different config.
2940 Seek(base::TimeDelta::FromMilliseconds(527));
2942 // Verify that the config change is signalled.
2943 ExpectConfigChanged(DemuxerStream::VIDEO);
2945 // Fetch the new decoder config and verify it is what we expect.
2946 const VideoDecoderConfig& video_config_2 = video->video_decoder_config();
2947 ASSERT_TRUE(video_config_2.IsValidConfig());
2948 EXPECT_EQ(video_config_2.natural_size().width(), 640);
2949 EXPECT_EQ(video_config_2.natural_size().height(), 360);
2951 // Verify that Read() will return a buffer now.
2952 ExpectRead(DemuxerStream::VIDEO, 527);
2954 // Seek back to the beginning and verify we get another config change.
2955 Seek(base::TimeDelta::FromMilliseconds(0));
2956 ExpectConfigChanged(DemuxerStream::VIDEO);
2957 ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2958 ExpectRead(DemuxerStream::VIDEO, 0);
2960 // Seek to a location that requires a config change and then
2961 // seek to a new location that has the same configuration as
2962 // the start of the file without a Read() in the middle.
2963 Seek(base::TimeDelta::FromMilliseconds(527));
2964 Seek(base::TimeDelta::FromMilliseconds(801));
2966 // Verify that no config change is signalled.
2967 ExpectRead(DemuxerStream::VIDEO, 801);
2968 ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2971 TEST_F(ChunkDemuxerTest, TimestampPositiveOffset) {
2972 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2974 ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(30)));
2975 AppendCluster(GenerateCluster(0, 2));
2977 Seek(base::TimeDelta::FromMilliseconds(30000));
2979 GenerateExpectedReads(30000, 2);
2982 TEST_F(ChunkDemuxerTest, TimestampNegativeOffset) {
2983 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2985 ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(-1)));
2986 AppendCluster(GenerateCluster(1000, 2));
2988 GenerateExpectedReads(0, 2);
2991 TEST_F(ChunkDemuxerTest, TimestampOffsetSeparateStreams) {
2992 std::string audio_id = "audio1";
2993 std::string video_id = "video1";
2994 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2996 ASSERT_TRUE(SetTimestampOffset(
2997 audio_id, base::TimeDelta::FromMilliseconds(-2500)));
2998 ASSERT_TRUE(SetTimestampOffset(
2999 video_id, base::TimeDelta::FromMilliseconds(-2500)));
3000 AppendCluster(audio_id, GenerateSingleStreamCluster(2500,
3001 2500 + kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
3002 AppendCluster(video_id, GenerateSingleStreamCluster(2500,
3003 2500 + kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
3004 GenerateAudioStreamExpectedReads(0, 4);
3005 GenerateVideoStreamExpectedReads(0, 4);
3007 Seek(base::TimeDelta::FromMilliseconds(27300));
3009 ASSERT_TRUE(SetTimestampOffset(
3010 audio_id, base::TimeDelta::FromMilliseconds(27300)));
3011 ASSERT_TRUE(SetTimestampOffset(
3012 video_id, base::TimeDelta::FromMilliseconds(27300)));
3013 AppendCluster(audio_id, GenerateSingleStreamCluster(
3014 0, kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
3015 AppendCluster(video_id, GenerateSingleStreamCluster(
3016 0, kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
3017 GenerateVideoStreamExpectedReads(27300, 4);
3018 GenerateAudioStreamExpectedReads(27300, 4);
3021 TEST_F(ChunkDemuxerTest, IsParsingMediaSegmentMidMediaSegment) {
3022 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3024 scoped_ptr<Cluster> cluster = GenerateCluster(0, 2);
3025 // Append only part of the cluster data.
3026 AppendData(cluster->data(), cluster->size() - 13);
3028 // Confirm we're in the middle of parsing a media segment.
3029 ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
3031 demuxer_->Abort(kSourceId,
3032 append_window_start_for_next_append_,
3033 append_window_end_for_next_append_,
3034 &timestamp_offset_map_[kSourceId]);
3036 // After Abort(), parsing should no longer be in the middle of a media
3037 // segment.
3038 ASSERT_FALSE(demuxer_->IsParsingMediaSegment(kSourceId));
3041 #if defined(USE_PROPRIETARY_CODECS)
3042 #if defined(ENABLE_MPEG2TS_STREAM_PARSER)
3043 TEST_F(ChunkDemuxerTest, EmitBuffersDuringAbort) {
3044 EXPECT_CALL(*this, DemuxerOpened());
3045 demuxer_->Initialize(
3046 &host_, CreateInitDoneCB(kInfiniteDuration(), PIPELINE_OK), true);
3047 EXPECT_EQ(ChunkDemuxer::kOk, AddIdForMp2tSource(kSourceId));
3049 // For info:
3050 // DTS/PTS derived using dvbsnoop -s ts -if bear-1280x720.ts -tssubdecode
3051 // Video: first PES:
3052 // PTS: 126912 (0x0001efc0) [= 90 kHz-Timestamp: 0:00:01.4101]
3053 // DTS: 123909 (0x0001e405) [= 90 kHz-Timestamp: 0:00:01.3767]
3054 // Audio: first PES:
3055 // PTS: 126000 (0x0001ec30) [= 90 kHz-Timestamp: 0:00:01.4000]
3056 // DTS: 123910 (0x0001e406) [= 90 kHz-Timestamp: 0:00:01.3767]
3057 // Video: last PES:
3058 // PTS: 370155 (0x0005a5eb) [= 90 kHz-Timestamp: 0:00:04.1128]
3059 // DTS: 367152 (0x00059a30) [= 90 kHz-Timestamp: 0:00:04.0794]
3060 // Audio: last PES:
3061 // PTS: 353788 (0x000565fc) [= 90 kHz-Timestamp: 0:00:03.9309]
3063 scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("bear-1280x720.ts");
3064 EXPECT_CALL(*this, InitSegmentReceived());
3065 AppendData(kSourceId, buffer->data(), buffer->data_size());
3067 // Confirm we're in the middle of parsing a media segment.
3068 ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
3070 // Abort on the Mpeg2 TS parser triggers the emission of the last video
3071 // buffer which is pending in the stream parser.
3072 Ranges<base::TimeDelta> range_before_abort =
3073 demuxer_->GetBufferedRanges(kSourceId);
3074 demuxer_->Abort(kSourceId,
3075 append_window_start_for_next_append_,
3076 append_window_end_for_next_append_,
3077 &timestamp_offset_map_[kSourceId]);
3078 Ranges<base::TimeDelta> range_after_abort =
3079 demuxer_->GetBufferedRanges(kSourceId);
3081 ASSERT_EQ(range_before_abort.size(), 1u);
3082 ASSERT_EQ(range_after_abort.size(), 1u);
3083 EXPECT_EQ(range_after_abort.start(0), range_before_abort.start(0));
3084 EXPECT_GT(range_after_abort.end(0), range_before_abort.end(0));
3087 TEST_F(ChunkDemuxerTest, SeekCompleteDuringAbort) {
3088 EXPECT_CALL(*this, DemuxerOpened());
3089 demuxer_->Initialize(
3090 &host_, CreateInitDoneCB(kInfiniteDuration(), PIPELINE_OK), true);
3091 EXPECT_EQ(ChunkDemuxer::kOk, AddIdForMp2tSource(kSourceId));
3093 // For info:
3094 // DTS/PTS derived using dvbsnoop -s ts -if bear-1280x720.ts -tssubdecode
3095 // Video: first PES:
3096 // PTS: 126912 (0x0001efc0) [= 90 kHz-Timestamp: 0:00:01.4101]
3097 // DTS: 123909 (0x0001e405) [= 90 kHz-Timestamp: 0:00:01.3767]
3098 // Audio: first PES:
3099 // PTS: 126000 (0x0001ec30) [= 90 kHz-Timestamp: 0:00:01.4000]
3100 // DTS: 123910 (0x0001e406) [= 90 kHz-Timestamp: 0:00:01.3767]
3101 // Video: last PES:
3102 // PTS: 370155 (0x0005a5eb) [= 90 kHz-Timestamp: 0:00:04.1128]
3103 // DTS: 367152 (0x00059a30) [= 90 kHz-Timestamp: 0:00:04.0794]
3104 // Audio: last PES:
3105 // PTS: 353788 (0x000565fc) [= 90 kHz-Timestamp: 0:00:03.9309]
3107 scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("bear-1280x720.ts");
3108 EXPECT_CALL(*this, InitSegmentReceived());
3109 AppendData(kSourceId, buffer->data(), buffer->data_size());
3111 // Confirm we're in the middle of parsing a media segment.
3112 ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
3114 // Seek to a time corresponding to buffers that will be emitted during the
3115 // abort.
3116 Seek(base::TimeDelta::FromMilliseconds(4110));
3118 // Abort on the Mpeg2 TS parser triggers the emission of the last video
3119 // buffer which is pending in the stream parser.
3120 demuxer_->Abort(kSourceId,
3121 append_window_start_for_next_append_,
3122 append_window_end_for_next_append_,
3123 &timestamp_offset_map_[kSourceId]);
3126 #endif
3127 #endif
3129 TEST_F(ChunkDemuxerTest, WebMIsParsingMediaSegmentDetection) {
3130 const uint8 kBuffer[] = {
3131 0x1F, 0x43, 0xB6, 0x75, 0x83, // CLUSTER (size = 3)
3132 0xE7, 0x81, 0x01, // Cluster TIMECODE (value = 1)
3134 0x1F, 0x43, 0xB6, 0x75, 0xFF, // CLUSTER (size = unknown; really 3 due to:)
3135 0xE7, 0x81, 0x02, // Cluster TIMECODE (value = 2)
3136 /* e.g. put some blocks here... */
3137 0x1A, 0x45, 0xDF, 0xA3, 0x8A, // EBMLHEADER (size = 10, not fully appended)
3140 // This array indicates expected return value of IsParsingMediaSegment()
3141 // following each incrementally appended byte in |kBuffer|.
3142 const bool kExpectedReturnValues[] = {
3143 false, false, false, false, true,
3144 true, true, false,
3146 false, false, false, false, true,
3147 true, true, true,
3149 true, true, true, true, false,
3152 static_assert(arraysize(kBuffer) == arraysize(kExpectedReturnValues),
3153 "test arrays out of sync");
3154 static_assert(arraysize(kBuffer) == sizeof(kBuffer),
3155 "there should be one byte per index");
3157 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3159 for (size_t i = 0; i < sizeof(kBuffer); i++) {
3160 DVLOG(3) << "Appending and testing index " << i;
3161 AppendData(kBuffer + i, 1);
3162 bool expected_return_value = kExpectedReturnValues[i];
3163 EXPECT_EQ(expected_return_value,
3164 demuxer_->IsParsingMediaSegment(kSourceId));
3168 TEST_F(ChunkDemuxerTest, DurationChange) {
3169 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3170 const int kStreamDuration = kDefaultDuration().InMilliseconds();
3172 // Add data leading up to the currently set duration.
3173 AppendCluster(GenerateCluster(kStreamDuration - kAudioBlockDuration,
3174 kStreamDuration - kVideoBlockDuration,
3175 2));
3177 CheckExpectedRanges(kSourceId, "{ [201191,201224) }");
3179 // Add data beginning at the currently set duration and expect a new duration
3180 // to be signaled. Note that the last video block will have a higher end
3181 // timestamp than the last audio block.
3182 const int kNewStreamDurationVideo = kStreamDuration + kVideoBlockDuration;
3183 EXPECT_CALL(host_, SetDuration(
3184 base::TimeDelta::FromMilliseconds(kNewStreamDurationVideo)));
3185 AppendCluster(GenerateCluster(kDefaultDuration().InMilliseconds(), 2));
3187 CheckExpectedRanges(kSourceId, "{ [201191,201247) }");
3189 // Add more data to the end of each media type. Note that the last audio block
3190 // will have a higher end timestamp than the last video block.
3191 const int kFinalStreamDuration = kStreamDuration + kAudioBlockDuration * 3;
3192 EXPECT_CALL(host_, SetDuration(
3193 base::TimeDelta::FromMilliseconds(kFinalStreamDuration)));
3194 AppendCluster(GenerateCluster(kStreamDuration + kAudioBlockDuration,
3195 kStreamDuration + kVideoBlockDuration,
3196 3));
3198 // See that the range has increased appropriately (but not to the full
3199 // duration of 201293, since there is not enough video appended for that).
3200 CheckExpectedRanges(kSourceId, "{ [201191,201290) }");
3203 TEST_F(ChunkDemuxerTest, DurationChangeTimestampOffset) {
3204 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3205 ASSERT_TRUE(SetTimestampOffset(kSourceId, kDefaultDuration()));
3206 EXPECT_CALL(host_, SetDuration(
3207 kDefaultDuration() + base::TimeDelta::FromMilliseconds(
3208 kVideoBlockDuration * 2)));
3209 AppendCluster(GenerateCluster(0, 4));
3212 TEST_F(ChunkDemuxerTest, EndOfStreamTruncateDuration) {
3213 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3215 AppendCluster(kDefaultFirstCluster());
3217 EXPECT_CALL(host_, SetDuration(
3218 base::TimeDelta::FromMilliseconds(kDefaultFirstClusterEndTimestamp)));
3219 MarkEndOfStream(PIPELINE_OK);
3223 TEST_F(ChunkDemuxerTest, ZeroLengthAppend) {
3224 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3225 AppendData(NULL, 0);
3228 TEST_F(ChunkDemuxerTest, AppendAfterEndOfStream) {
3229 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3231 EXPECT_CALL(host_, SetDuration(_))
3232 .Times(AnyNumber());
3234 AppendCluster(kDefaultFirstCluster());
3235 MarkEndOfStream(PIPELINE_OK);
3237 demuxer_->UnmarkEndOfStream();
3239 AppendCluster(kDefaultSecondCluster());
3240 MarkEndOfStream(PIPELINE_OK);
3243 // Test receiving a Shutdown() call before we get an Initialize()
3244 // call. This can happen if video element gets destroyed before
3245 // the pipeline has a chance to initialize the demuxer.
3246 TEST_F(ChunkDemuxerTest, Shutdown_BeforeInitialize) {
3247 demuxer_->Shutdown();
3248 demuxer_->Initialize(
3249 &host_, CreateInitDoneCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
3250 message_loop_.RunUntilIdle();
3253 // Verifies that signaling end of stream while stalled at a gap
3254 // boundary does not trigger end of stream buffers to be returned.
3255 TEST_F(ChunkDemuxerTest, EndOfStreamWhileWaitingForGapToBeFilled) {
3256 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3258 AppendCluster(0, 10);
3259 AppendCluster(300, 10);
3260 CheckExpectedRanges(kSourceId, "{ [0,132) [300,432) }");
3262 GenerateExpectedReads(0, 10);
3264 bool audio_read_done = false;
3265 bool video_read_done = false;
3266 ReadAudio(base::Bind(&OnReadDone,
3267 base::TimeDelta::FromMilliseconds(138),
3268 &audio_read_done));
3269 ReadVideo(base::Bind(&OnReadDone,
3270 base::TimeDelta::FromMilliseconds(138),
3271 &video_read_done));
3273 // Verify that the reads didn't complete
3274 EXPECT_FALSE(audio_read_done);
3275 EXPECT_FALSE(video_read_done);
3277 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(438)));
3278 MarkEndOfStream(PIPELINE_OK);
3280 // Verify that the reads still haven't completed.
3281 EXPECT_FALSE(audio_read_done);
3282 EXPECT_FALSE(video_read_done);
3284 demuxer_->UnmarkEndOfStream();
3286 AppendCluster(138, 22);
3288 message_loop_.RunUntilIdle();
3290 CheckExpectedRanges(kSourceId, "{ [0,435) }");
3292 // Verify that the reads have completed.
3293 EXPECT_TRUE(audio_read_done);
3294 EXPECT_TRUE(video_read_done);
3296 // Read the rest of the buffers.
3297 GenerateExpectedReads(161, 171, 20);
3299 // Verify that reads block because the append cleared the end of stream state.
3300 audio_read_done = false;
3301 video_read_done = false;
3302 ReadAudio(base::Bind(&OnReadDone_EOSExpected,
3303 &audio_read_done));
3304 ReadVideo(base::Bind(&OnReadDone_EOSExpected,
3305 &video_read_done));
3307 // Verify that the reads don't complete.
3308 EXPECT_FALSE(audio_read_done);
3309 EXPECT_FALSE(video_read_done);
3311 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(437)));
3312 MarkEndOfStream(PIPELINE_OK);
3314 EXPECT_TRUE(audio_read_done);
3315 EXPECT_TRUE(video_read_done);
3318 TEST_F(ChunkDemuxerTest, CanceledSeekDuringInitialPreroll) {
3319 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3321 // Cancel preroll.
3322 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(200);
3323 demuxer_->CancelPendingSeek(seek_time);
3325 // Initiate the seek to the new location.
3326 Seek(seek_time);
3328 // Append data to satisfy the seek.
3329 AppendCluster(seek_time.InMilliseconds(), 10);
3332 TEST_F(ChunkDemuxerTest, SetMemoryLimitType) {
3333 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3335 // Set different memory limits for audio and video.
3336 demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 10 * kBlockSize);
3337 demuxer_->SetMemoryLimits(DemuxerStream::VIDEO, 5 * kBlockSize + 1);
3339 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(1000);
3341 // Append data at the start that can be garbage collected:
3342 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 0, 10);
3343 AppendSingleStreamCluster(kSourceId, kVideoTrackNum, 0, 5);
3345 // We should be right at buffer limit, should pass
3346 EXPECT_TRUE(demuxer_->EvictCodedFrames(
3347 kSourceId, base::TimeDelta::FromMilliseconds(0), 0));
3349 CheckExpectedRanges(DemuxerStream::AUDIO, "{ [0,230) }");
3350 CheckExpectedRanges(DemuxerStream::VIDEO, "{ [0,165) }");
3352 // Seek so we can garbage collect the data appended above.
3353 Seek(seek_time);
3355 // Append data at seek_time.
3356 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3357 seek_time.InMilliseconds(), 10);
3358 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3359 seek_time.InMilliseconds(), 5);
3361 // We should delete first append, and be exactly at buffer limit
3362 EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time, 0));
3364 // Verify that the old data, and nothing more, has been garbage collected.
3365 CheckExpectedRanges(DemuxerStream::AUDIO, "{ [1000,1230) }");
3366 CheckExpectedRanges(DemuxerStream::VIDEO, "{ [1000,1165) }");
3369 TEST_F(ChunkDemuxerTest, GCDuringSeek) {
3370 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3372 demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 5 * kBlockSize);
3374 base::TimeDelta seek_time1 = base::TimeDelta::FromMilliseconds(1000);
3375 base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(500);
3377 // Initiate a seek to |seek_time1|.
3378 Seek(seek_time1);
3380 // Append data to satisfy the first seek request.
3381 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3382 seek_time1.InMilliseconds(), 5);
3383 CheckExpectedRanges(kSourceId, "{ [1000,1115) }");
3385 // We are under memory limit, so Evict should be a no-op.
3386 EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time1, 0));
3387 CheckExpectedRanges(kSourceId, "{ [1000,1115) }");
3389 // Signal that the second seek is starting.
3390 demuxer_->StartWaitingForSeek(seek_time2);
3392 // Append data to satisfy the second seek.
3393 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3394 seek_time2.InMilliseconds(), 5);
3395 CheckExpectedRanges(kSourceId, "{ [500,615) [1000,1115) }");
3397 // We are now over our memory usage limit. We have just seeked to |seek_time2|
3398 // so data around 500ms position should be preserved, while the previous
3399 // append at 1000ms should be removed.
3400 EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time2, 0));
3401 CheckExpectedRanges(kSourceId, "{ [500,615) }");
3403 // Complete the seek.
3404 demuxer_->Seek(seek_time2, NewExpectedStatusCB(PIPELINE_OK));
3406 // Append more data and make sure that we preserve both the buffered range
3407 // around |seek_time2|, because that's the current playback position,
3408 // and the newly appended range, since this is the most recent append.
3409 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 700, 5);
3410 EXPECT_FALSE(demuxer_->EvictCodedFrames(kSourceId, seek_time2, 0));
3411 CheckExpectedRanges(kSourceId, "{ [500,615) [700,815) }");
3414 TEST_F(ChunkDemuxerTest, GCKeepPlayhead) {
3415 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3417 demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 5 * kBlockSize);
3419 // Append data at the start that can be garbage collected:
3420 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 0, 10);
3421 CheckExpectedRanges(kSourceId, "{ [0,230) }");
3423 // We expect garbage collection to fail, as we don't want to spontaneously
3424 // create gaps in source buffer stream. Gaps could break playback for many
3425 // clients, who don't bother to check ranges after append.
3426 EXPECT_FALSE(demuxer_->EvictCodedFrames(
3427 kSourceId, base::TimeDelta::FromMilliseconds(0), 0));
3428 CheckExpectedRanges(kSourceId, "{ [0,230) }");
3430 // Increase media_time a bit, this will allow some data to be collected, but
3431 // we are still over memory usage limit.
3432 base::TimeDelta seek_time1 = base::TimeDelta::FromMilliseconds(23*2);
3433 Seek(seek_time1);
3434 EXPECT_FALSE(demuxer_->EvictCodedFrames(kSourceId, seek_time1, 0));
3435 CheckExpectedRanges(kSourceId, "{ [46,230) }");
3437 base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(23*4);
3438 Seek(seek_time2);
3439 EXPECT_FALSE(demuxer_->EvictCodedFrames(kSourceId, seek_time2, 0));
3440 CheckExpectedRanges(kSourceId, "{ [92,230) }");
3442 // media_time has progressed to a point where we can collect enough data to
3443 // be under memory limit, so Evict should return true.
3444 base::TimeDelta seek_time3 = base::TimeDelta::FromMilliseconds(23*6);
3445 Seek(seek_time3);
3446 EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time3, 0));
3447 // Strictly speaking the current playback time is 23*6==138ms, so we could
3448 // release data up to 138ms, but we only release as much data as necessary
3449 // to bring memory usage under the limit, so we release only up to 115ms.
3450 CheckExpectedRanges(kSourceId, "{ [115,230) }");
3453 TEST_F(ChunkDemuxerTest, AppendWindow_Video) {
3454 ASSERT_TRUE(InitDemuxer(HAS_VIDEO));
3455 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3457 // Set the append window to [50,280).
3458 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
3459 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3461 // Append a cluster that starts before and ends after the append window.
3462 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3463 "0K 30 60 90 120K 150 180 210 240K 270 300 330K");
3465 // Verify that GOPs that start outside the window are not included
3466 // in the buffer. Also verify that buffers that start inside the
3467 // window and extend beyond the end of the window are not included.
3468 CheckExpectedRanges(kSourceId, "{ [120,270) }");
3469 CheckExpectedBuffers(stream, "120K 150 180 210 240K");
3471 // Extend the append window to [50,650).
3472 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3474 // Append more data and verify that adding buffers start at the next
3475 // key frame.
3476 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3477 "360 390 420K 450 480 510 540K 570 600 630K");
3478 CheckExpectedRanges(kSourceId, "{ [120,270) [420,630) }");
3481 TEST_F(ChunkDemuxerTest, AppendWindow_Audio) {
3482 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3483 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3485 // Set the append window to [50,280).
3486 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
3487 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3489 // Append a cluster that starts before and ends after the append window.
3490 AppendSingleStreamCluster(
3491 kSourceId, kAudioTrackNum,
3492 "0K 30K 60K 90K 120K 150K 180K 210K 240K 270K 300K 330K");
3494 // Verify that frames that end outside the window are not included
3495 // in the buffer. Also verify that buffers that start inside the
3496 // window and extend beyond the end of the window are not included.
3498 // The first 50ms of the range should be truncated since it overlaps
3499 // the start of the append window.
3500 CheckExpectedRanges(kSourceId, "{ [50,280) }");
3502 // The "50P" buffer is the "0" buffer marked for complete discard. The next
3503 // "50" buffer is the "30" buffer marked with 20ms of start discard.
3504 CheckExpectedBuffers(stream, "50KP 50K 60K 90K 120K 150K 180K 210K 240K");
3506 // Extend the append window to [50,650).
3507 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3509 // Append more data and verify that a new range is created.
3510 AppendSingleStreamCluster(
3511 kSourceId, kAudioTrackNum,
3512 "360K 390K 420K 450K 480K 510K 540K 570K 600K 630K");
3513 CheckExpectedRanges(kSourceId, "{ [50,280) [360,650) }");
3516 TEST_F(ChunkDemuxerTest, AppendWindow_AudioOverlapStartAndEnd) {
3517 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3519 // Set the append window to [10,20).
3520 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(10);
3521 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
3523 // Append a cluster that starts before and ends after the append window.
3524 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K");
3526 // Verify the append is clipped to the append window.
3527 CheckExpectedRanges(kSourceId, "{ [10,20) }");
3530 TEST_F(ChunkDemuxerTest, AppendWindow_WebMFile_AudioOnly) {
3531 EXPECT_CALL(*this, DemuxerOpened());
3532 demuxer_->Initialize(
3533 &host_,
3534 CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744), PIPELINE_OK),
3535 true);
3536 ASSERT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, HAS_AUDIO));
3538 // Set the append window to [50,150).
3539 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
3540 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(150);
3542 // Read a WebM file into memory and send the data to the demuxer. The chunk
3543 // size has been chosen carefully to ensure the preroll buffer used by the
3544 // partial append window trim must come from a previous Append() call.
3545 scoped_refptr<DecoderBuffer> buffer =
3546 ReadTestDataFile("bear-320x240-audio-only.webm");
3547 EXPECT_CALL(*this, InitSegmentReceived());
3548 AppendDataInPieces(buffer->data(), buffer->data_size(), 128);
3550 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3551 CheckExpectedBuffers(stream, "50KP 50K 62K 86K 109K 122K 125K 128K");
3554 TEST_F(ChunkDemuxerTest, AppendWindow_AudioConfigUpdateRemovesPreroll) {
3555 EXPECT_CALL(*this, DemuxerOpened());
3556 demuxer_->Initialize(
3557 &host_,
3558 CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744), PIPELINE_OK),
3559 true);
3560 ASSERT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, HAS_AUDIO));
3562 // Set the append window such that the first file is completely before the
3563 // append window.
3564 // TODO(wolenetz/acolwell): Update this duration once the files are fixed to
3565 // have the correct duration in their init segments, and the
3566 // CreateInitDoneCB() call, above, is fixed to used that duration. See
3567 // http://crbug.com/354284.
3568 const base::TimeDelta duration_1 = base::TimeDelta::FromMilliseconds(2746);
3569 append_window_start_for_next_append_ = duration_1;
3571 // Read a WebM file into memory and append the data.
3572 scoped_refptr<DecoderBuffer> buffer =
3573 ReadTestDataFile("bear-320x240-audio-only.webm");
3574 EXPECT_CALL(*this, InitSegmentReceived());
3575 AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
3576 CheckExpectedRanges(kSourceId, "{ }");
3578 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3579 AudioDecoderConfig config_1 = stream->audio_decoder_config();
3581 // Read a second WebM with a different config in and append the data.
3582 scoped_refptr<DecoderBuffer> buffer2 =
3583 ReadTestDataFile("bear-320x240-audio-only-48khz.webm");
3584 EXPECT_CALL(*this, InitSegmentReceived());
3585 EXPECT_CALL(host_, SetDuration(_)).Times(AnyNumber());
3586 ASSERT_TRUE(SetTimestampOffset(kSourceId, duration_1));
3587 AppendDataInPieces(buffer2->data(), buffer2->data_size(), 512);
3588 CheckExpectedRanges(kSourceId, "{ [2746,5519) }");
3590 Seek(duration_1);
3591 ExpectConfigChanged(DemuxerStream::AUDIO);
3592 ASSERT_FALSE(config_1.Matches(stream->audio_decoder_config()));
3593 CheckExpectedBuffers(stream, "2746K 2767K 2789K 2810K");
3596 TEST_F(ChunkDemuxerTest, AppendWindow_Text) {
3597 DemuxerStream* text_stream = NULL;
3598 EXPECT_CALL(host_, AddTextStream(_, _))
3599 .WillOnce(SaveArg<0>(&text_stream));
3600 ASSERT_TRUE(InitDemuxer(HAS_VIDEO | HAS_TEXT));
3601 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3603 // Set the append window to [20,280).
3604 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
3605 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3607 // Append a cluster that starts before and ends after the append
3608 // window.
3609 AppendMuxedCluster(
3610 MuxedStreamInfo(kVideoTrackNum,
3611 "0K 30 60 90 120K 150 180 210 240K 270 300 330K"),
3612 MuxedStreamInfo(kTextTrackNum, "0K 100K 200K 300K" ));
3614 // Verify that text cues that start outside the window are not included
3615 // in the buffer. Also verify that cues that extend beyond the
3616 // window are not included.
3617 CheckExpectedRanges(kSourceId, "{ [100,270) }");
3618 CheckExpectedBuffers(video_stream, "120K 150 180 210 240K");
3619 CheckExpectedBuffers(text_stream, "100K");
3621 // Extend the append window to [20,650).
3622 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3624 // Append more data and verify that a new range is created.
3625 AppendMuxedCluster(
3626 MuxedStreamInfo(kVideoTrackNum,
3627 "360 390 420K 450 480 510 540K 570 600 630K"),
3628 MuxedStreamInfo(kTextTrackNum, "400K 500K 600K 700K" ));
3629 CheckExpectedRanges(kSourceId, "{ [100,270) [400,630) }");
3631 // Seek to the new range and verify that the expected buffers are returned.
3632 Seek(base::TimeDelta::FromMilliseconds(420));
3633 CheckExpectedBuffers(video_stream, "420K 450 480 510 540K 570 600");
3634 CheckExpectedBuffers(text_stream, "400K 500K");
3637 TEST_F(ChunkDemuxerTest, StartWaitingForSeekAfterParseError) {
3638 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3639 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
3640 AppendGarbage();
3641 base::TimeDelta seek_time = base::TimeDelta::FromSeconds(50);
3642 demuxer_->StartWaitingForSeek(seek_time);
3645 TEST_F(ChunkDemuxerTest, Remove_AudioVideoText) {
3646 DemuxerStream* text_stream = NULL;
3647 EXPECT_CALL(host_, AddTextStream(_, _))
3648 .WillOnce(SaveArg<0>(&text_stream));
3649 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
3651 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3652 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3654 AppendMuxedCluster(
3655 MuxedStreamInfo(kAudioTrackNum, "0K 20K 40K 60K 80K 100K 120K 140K"),
3656 MuxedStreamInfo(kVideoTrackNum, "0K 30 60 90 120K 150 180"),
3657 MuxedStreamInfo(kTextTrackNum, "0K 100K 200K"));
3659 CheckExpectedBuffers(audio_stream, "0K 20K 40K 60K 80K 100K 120K 140K");
3660 CheckExpectedBuffers(video_stream, "0K 30 60 90 120K 150 180");
3661 CheckExpectedBuffers(text_stream, "0K 100K 200K");
3663 // Remove the buffers that were added.
3664 demuxer_->Remove(kSourceId, base::TimeDelta(),
3665 base::TimeDelta::FromMilliseconds(300));
3667 // Verify that all the appended data has been removed.
3668 CheckExpectedRanges(kSourceId, "{ }");
3670 // Append new buffers that are clearly different than the original
3671 // ones and verify that only the new buffers are returned.
3672 AppendMuxedCluster(
3673 MuxedStreamInfo(kAudioTrackNum, "1K 21K 41K 61K 81K 101K 121K 141K"),
3674 MuxedStreamInfo(kVideoTrackNum, "1K 31 61 91 121K 151 181"),
3675 MuxedStreamInfo(kTextTrackNum, "1K 101K 201K"));
3677 Seek(base::TimeDelta());
3678 CheckExpectedBuffers(audio_stream, "1K 21K 41K 61K 81K 101K 121K 141K");
3679 CheckExpectedBuffers(video_stream, "1K 31 61 91 121K 151 181");
3680 CheckExpectedBuffers(text_stream, "1K 101K 201K");
3683 TEST_F(ChunkDemuxerTest, Remove_StartAtDuration) {
3684 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3685 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3687 // Set the duration to something small so that the append that
3688 // follows updates the duration to reflect the end of the appended data.
3689 EXPECT_CALL(host_, SetDuration(
3690 base::TimeDelta::FromMilliseconds(1)));
3691 demuxer_->SetDuration(0.001);
3693 EXPECT_CALL(host_, SetDuration(
3694 base::TimeDelta::FromMilliseconds(160)));
3695 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3696 "0K 20K 40K 60K 80K 100K 120K 140K");
3698 CheckExpectedRanges(kSourceId, "{ [0,160) }");
3699 CheckExpectedBuffers(audio_stream, "0K 20K 40K 60K 80K 100K 120K 140K");
3701 demuxer_->Remove(kSourceId,
3702 base::TimeDelta::FromSecondsD(demuxer_->GetDuration()),
3703 kInfiniteDuration());
3705 Seek(base::TimeDelta());
3706 CheckExpectedRanges(kSourceId, "{ [0,160) }");
3707 CheckExpectedBuffers(audio_stream, "0K 20K 40K 60K 80K 100K 120K 140K");
3710 // Verifies that a Seek() will complete without text cues for
3711 // the seek point and will return cues after the seek position
3712 // when they are eventually appended.
3713 TEST_F(ChunkDemuxerTest, SeekCompletesWithoutTextCues) {
3714 DemuxerStream* text_stream = NULL;
3715 EXPECT_CALL(host_, AddTextStream(_, _))
3716 .WillOnce(SaveArg<0>(&text_stream));
3717 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
3719 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3720 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3722 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(120);
3723 bool seek_cb_was_called = false;
3724 demuxer_->StartWaitingForSeek(seek_time);
3725 demuxer_->Seek(seek_time,
3726 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
3727 message_loop_.RunUntilIdle();
3729 EXPECT_FALSE(seek_cb_was_called);
3731 bool text_read_done = false;
3732 text_stream->Read(base::Bind(&OnReadDone,
3733 base::TimeDelta::FromMilliseconds(225),
3734 &text_read_done));
3736 // Append audio & video data so the seek completes.
3737 AppendMuxedCluster(
3738 MuxedStreamInfo(kAudioTrackNum,
3739 "0K 20K 40K 60K 80K 100K 120K 140K 160K 180K 200K"),
3740 MuxedStreamInfo(kVideoTrackNum, "0K 30 60 90 120K 150 180 210"));
3742 message_loop_.RunUntilIdle();
3743 EXPECT_TRUE(seek_cb_was_called);
3744 EXPECT_FALSE(text_read_done);
3746 // Read some audio & video buffers to further verify seek completion.
3747 CheckExpectedBuffers(audio_stream, "120K 140K");
3748 CheckExpectedBuffers(video_stream, "120K 150");
3750 EXPECT_FALSE(text_read_done);
3752 // Append text cues that start after the seek point and verify that
3753 // they are returned by Read() calls.
3754 AppendMuxedCluster(
3755 MuxedStreamInfo(kAudioTrackNum, "220K 240K 260K 280K"),
3756 MuxedStreamInfo(kVideoTrackNum, "240K 270 300 330"),
3757 MuxedStreamInfo(kTextTrackNum, "225K 275K 325K"));
3759 message_loop_.RunUntilIdle();
3760 EXPECT_TRUE(text_read_done);
3762 // NOTE: we start at 275 here because the buffer at 225 was returned
3763 // to the pending read initiated above.
3764 CheckExpectedBuffers(text_stream, "275K 325K");
3766 // Verify that audio & video streams continue to return expected values.
3767 CheckExpectedBuffers(audio_stream, "160K 180K");
3768 CheckExpectedBuffers(video_stream, "180 210");
3771 TEST_F(ChunkDemuxerTest, ClusterWithUnknownSize) {
3772 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3774 AppendCluster(GenerateCluster(0, 0, 4, true));
3775 CheckExpectedRanges(kSourceId, "{ [0,46) }");
3777 // A new cluster indicates end of the previous cluster with unknown size.
3778 AppendCluster(GenerateCluster(46, 66, 5, true));
3779 CheckExpectedRanges(kSourceId, "{ [0,115) }");
3782 TEST_F(ChunkDemuxerTest, CuesBetweenClustersWithUnknownSize) {
3783 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3785 // Add two clusters separated by Cues in a single Append() call.
3786 scoped_ptr<Cluster> cluster = GenerateCluster(0, 0, 4, true);
3787 std::vector<uint8> data(cluster->data(), cluster->data() + cluster->size());
3788 data.insert(data.end(), kCuesHeader, kCuesHeader + sizeof(kCuesHeader));
3789 cluster = GenerateCluster(46, 66, 5, true);
3790 data.insert(data.end(), cluster->data(), cluster->data() + cluster->size());
3791 AppendData(&*data.begin(), data.size());
3793 CheckExpectedRanges(kSourceId, "{ [0,115) }");
3796 TEST_F(ChunkDemuxerTest, CuesBetweenClusters) {
3797 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3799 AppendCluster(GenerateCluster(0, 0, 4));
3800 AppendData(kCuesHeader, sizeof(kCuesHeader));
3801 AppendCluster(GenerateCluster(46, 66, 5));
3802 CheckExpectedRanges(kSourceId, "{ [0,115) }");
3805 TEST_F(ChunkDemuxerTest, EvictCodedFramesTest) {
3806 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3807 demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 10 * kBlockSize);
3808 demuxer_->SetMemoryLimits(DemuxerStream::VIDEO, 15 * kBlockSize);
3809 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3810 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3812 const char* kAudioStreamInfo = "0K 40K 80K 120K 160K 200K 240K 280K";
3813 const char* kVideoStreamInfo = "0K 10 20K 30 40K 50 60K 70 80K 90 100K "
3814 "110 120K 130 140K";
3815 // Append 8 blocks (80 bytes) of data to audio stream and 15 blocks (150
3816 // bytes) to video stream.
3817 AppendMuxedCluster(
3818 MuxedStreamInfo(kAudioTrackNum, kAudioStreamInfo),
3819 MuxedStreamInfo(kVideoTrackNum, kVideoStreamInfo));
3820 CheckExpectedBuffers(audio_stream, kAudioStreamInfo);
3821 CheckExpectedBuffers(video_stream, kVideoStreamInfo);
3823 // If we want to append 80 more blocks of muxed a+v data and the current
3824 // position is 0, that will fail, because EvictCodedFrames won't remove the
3825 // data after the current playback position.
3826 ASSERT_FALSE(demuxer_->EvictCodedFrames(kSourceId,
3827 base::TimeDelta::FromMilliseconds(0),
3828 80));
3829 // EvictCodedFrames has failed, so data should be unchanged.
3830 Seek(base::TimeDelta::FromMilliseconds(0));
3831 CheckExpectedBuffers(audio_stream, kAudioStreamInfo);
3832 CheckExpectedBuffers(video_stream, kVideoStreamInfo);
3834 // But if we pretend that playback position has moved to 120ms, that allows
3835 // EvictCodedFrames to garbage-collect enough data to succeed.
3836 ASSERT_TRUE(demuxer_->EvictCodedFrames(kSourceId,
3837 base::TimeDelta::FromMilliseconds(120),
3838 80));
3840 Seek(base::TimeDelta::FromMilliseconds(0));
3841 // Audio stream had 8 buffers, video stream had 15. We told EvictCodedFrames
3842 // that the new data size is 8 blocks muxed, i.e. 80 bytes. Given the current
3843 // ratio of video to the total data size (15 : (8+15) ~= 0.65) the estimated
3844 // sizes of video and audio data in the new 80 byte chunk are 52 bytes for
3845 // video (80*0.65 = 52) and 28 bytes for audio (80 - 52).
3846 // Given these numbers MSE GC will remove just one audio block (since current
3847 // audio size is 80 bytes, new data is 28 bytes, we need to remove just one 10
3848 // byte block to stay under 100 bytes memory limit after append
3849 // 80 - 10 + 28 = 98).
3850 // For video stream 150 + 52 = 202. Video limit is 150 bytes. We need to
3851 // remove at least 6 blocks to stay under limit.
3852 CheckExpectedBuffers(audio_stream, "40K 80K 120K 160K 200K 240K 280K");
3853 CheckExpectedBuffers(video_stream, "60K 70 80K 90 100K 110 120K 130 140K");
3856 } // namespace media