Merge Chromium + Blink git repositories
[chromium-blink-merge.git] / media / filters / chunk_demuxer_unittest.cc
blob747e512db6c26fda1955264f1b3e346d33186a88
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include <algorithm>
7 #include "base/bind.h"
8 #include "base/message_loop/message_loop.h"
9 #include "base/strings/string_number_conversions.h"
10 #include "base/strings/string_split.h"
11 #include "base/strings/string_util.h"
12 #include "media/base/audio_decoder_config.h"
13 #include "media/base/decoder_buffer.h"
14 #include "media/base/decrypt_config.h"
15 #include "media/base/media_log.h"
16 #include "media/base/mock_demuxer_host.h"
17 #include "media/base/test_data_util.h"
18 #include "media/base/test_helpers.h"
19 #include "media/base/timestamp_constants.h"
20 #include "media/filters/chunk_demuxer.h"
21 #include "media/formats/webm/cluster_builder.h"
22 #include "media/formats/webm/webm_constants.h"
23 #include "testing/gtest/include/gtest/gtest.h"
25 using ::testing::AnyNumber;
26 using ::testing::Exactly;
27 using ::testing::InSequence;
28 using ::testing::NotNull;
29 using ::testing::Return;
30 using ::testing::SaveArg;
31 using ::testing::SetArgumentPointee;
32 using ::testing::_;
34 namespace media {
36 const uint8 kTracksHeader[] = {
37 0x16, 0x54, 0xAE, 0x6B, // Tracks ID
38 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // tracks(size = 0)
41 // WebM Block bytes that represent a VP8 key frame.
42 const uint8 kVP8Keyframe[] = {
43 0x010, 0x00, 0x00, 0x9d, 0x01, 0x2a, 0x00, 0x10, 0x00, 0x10, 0x00
46 // WebM Block bytes that represent a VP8 interframe.
47 const uint8 kVP8Interframe[] = { 0x11, 0x00, 0x00 };
49 const uint8 kCuesHeader[] = {
50 0x1C, 0x53, 0xBB, 0x6B, // Cues ID
51 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // cues(size = 0)
54 const uint8 kEncryptedMediaInitData[] = {
55 0x68, 0xFE, 0xF9, 0xA1, 0xB3, 0x0D, 0x6B, 0x4D,
56 0xF2, 0x22, 0xB5, 0x0B, 0x4D, 0xE9, 0xE9, 0x95,
59 const int kTracksHeaderSize = sizeof(kTracksHeader);
60 const int kTracksSizeOffset = 4;
62 // The size of TrackEntry element in test file "webm_vorbis_track_entry" starts
63 // at index 1 and spans 8 bytes.
64 const int kAudioTrackSizeOffset = 1;
65 const int kAudioTrackSizeWidth = 8;
66 const int kAudioTrackEntryHeaderSize =
67 kAudioTrackSizeOffset + kAudioTrackSizeWidth;
69 // The size of TrackEntry element in test file "webm_vp8_track_entry" starts at
70 // index 1 and spans 8 bytes.
71 const int kVideoTrackSizeOffset = 1;
72 const int kVideoTrackSizeWidth = 8;
73 const int kVideoTrackEntryHeaderSize =
74 kVideoTrackSizeOffset + kVideoTrackSizeWidth;
76 const int kVideoTrackNum = 1;
77 const int kAudioTrackNum = 2;
78 const int kTextTrackNum = 3;
79 const int kAlternateTextTrackNum = 4;
81 const int kAudioBlockDuration = 23;
82 const int kVideoBlockDuration = 33;
83 const int kTextBlockDuration = 100;
84 const int kBlockSize = 10;
86 const char kSourceId[] = "SourceId";
87 const char kDefaultFirstClusterRange[] = "{ [0,46) }";
88 const int kDefaultFirstClusterEndTimestamp = 66;
89 const int kDefaultSecondClusterEndTimestamp = 132;
91 base::TimeDelta kDefaultDuration() {
92 return base::TimeDelta::FromMilliseconds(201224);
95 // Write an integer into buffer in the form of vint that spans 8 bytes.
96 // The data pointed by |buffer| should be at least 8 bytes long.
97 // |number| should be in the range 0 <= number < 0x00FFFFFFFFFFFFFF.
98 static void WriteInt64(uint8* buffer, int64 number) {
99 DCHECK(number >= 0 && number < 0x00FFFFFFFFFFFFFFLL);
100 buffer[0] = 0x01;
101 int64 tmp = number;
102 for (int i = 7; i > 0; i--) {
103 buffer[i] = tmp & 0xff;
104 tmp >>= 8;
108 MATCHER_P(HasTimestamp, timestamp_in_ms, "") {
109 return arg.get() && !arg->end_of_stream() &&
110 arg->timestamp().InMilliseconds() == timestamp_in_ms;
113 MATCHER(IsEndOfStream, "") { return arg.get() && arg->end_of_stream(); }
115 static void OnReadDone(const base::TimeDelta& expected_time,
116 bool* called,
117 DemuxerStream::Status status,
118 const scoped_refptr<DecoderBuffer>& buffer) {
119 EXPECT_EQ(status, DemuxerStream::kOk);
120 EXPECT_EQ(expected_time, buffer->timestamp());
121 *called = true;
124 static void OnReadDone_AbortExpected(
125 bool* called, DemuxerStream::Status status,
126 const scoped_refptr<DecoderBuffer>& buffer) {
127 EXPECT_EQ(status, DemuxerStream::kAborted);
128 EXPECT_EQ(NULL, buffer.get());
129 *called = true;
132 static void OnReadDone_EOSExpected(bool* called,
133 DemuxerStream::Status status,
134 const scoped_refptr<DecoderBuffer>& buffer) {
135 EXPECT_EQ(status, DemuxerStream::kOk);
136 EXPECT_TRUE(buffer->end_of_stream());
137 *called = true;
140 static void OnSeekDone_OKExpected(bool* called, PipelineStatus status) {
141 EXPECT_EQ(status, PIPELINE_OK);
142 *called = true;
145 class ChunkDemuxerTest : public ::testing::Test {
146 protected:
147 enum CodecsIndex {
148 AUDIO,
149 VIDEO,
150 MAX_CODECS_INDEX
153 // Default cluster to append first for simple tests.
154 scoped_ptr<Cluster> kDefaultFirstCluster() {
155 return GenerateCluster(0, 4);
158 // Default cluster to append after kDefaultFirstCluster()
159 // has been appended. This cluster starts with blocks that
160 // have timestamps consistent with the end times of the blocks
161 // in kDefaultFirstCluster() so that these two clusters represent
162 // a continuous region.
163 scoped_ptr<Cluster> kDefaultSecondCluster() {
164 return GenerateCluster(46, 66, 5);
167 ChunkDemuxerTest()
168 : append_window_end_for_next_append_(kInfiniteDuration()) {
169 init_segment_received_cb_ =
170 base::Bind(&ChunkDemuxerTest::InitSegmentReceived,
171 base::Unretained(this));
172 CreateNewDemuxer();
175 void CreateNewDemuxer() {
176 base::Closure open_cb =
177 base::Bind(&ChunkDemuxerTest::DemuxerOpened, base::Unretained(this));
178 Demuxer::EncryptedMediaInitDataCB encrypted_media_init_data_cb = base::Bind(
179 &ChunkDemuxerTest::OnEncryptedMediaInitData, base::Unretained(this));
180 demuxer_.reset(new ChunkDemuxer(open_cb, encrypted_media_init_data_cb,
181 scoped_refptr<MediaLog>(new MediaLog()),
182 true));
185 virtual ~ChunkDemuxerTest() {
186 ShutdownDemuxer();
189 void CreateInitSegment(int stream_flags,
190 bool is_audio_encrypted,
191 bool is_video_encrypted,
192 scoped_ptr<uint8[]>* buffer,
193 int* size) {
194 CreateInitSegmentInternal(
195 stream_flags, is_audio_encrypted, is_video_encrypted, buffer, false,
196 size);
199 void CreateInitSegmentWithAlternateTextTrackNum(int stream_flags,
200 bool is_audio_encrypted,
201 bool is_video_encrypted,
202 scoped_ptr<uint8[]>* buffer,
203 int* size) {
204 DCHECK(stream_flags & HAS_TEXT);
205 CreateInitSegmentInternal(
206 stream_flags, is_audio_encrypted, is_video_encrypted, buffer, true,
207 size);
210 void CreateInitSegmentInternal(int stream_flags,
211 bool is_audio_encrypted,
212 bool is_video_encrypted,
213 scoped_ptr<uint8[]>* buffer,
214 bool use_alternate_text_track_id,
215 int* size) {
216 bool has_audio = (stream_flags & HAS_AUDIO) != 0;
217 bool has_video = (stream_flags & HAS_VIDEO) != 0;
218 bool has_text = (stream_flags & HAS_TEXT) != 0;
219 scoped_refptr<DecoderBuffer> ebml_header;
220 scoped_refptr<DecoderBuffer> info;
221 scoped_refptr<DecoderBuffer> audio_track_entry;
222 scoped_refptr<DecoderBuffer> video_track_entry;
223 scoped_refptr<DecoderBuffer> audio_content_encodings;
224 scoped_refptr<DecoderBuffer> video_content_encodings;
225 scoped_refptr<DecoderBuffer> text_track_entry;
227 ebml_header = ReadTestDataFile("webm_ebml_element");
229 info = ReadTestDataFile("webm_info_element");
231 int tracks_element_size = 0;
233 if (has_audio) {
234 audio_track_entry = ReadTestDataFile("webm_vorbis_track_entry");
235 tracks_element_size += audio_track_entry->data_size();
236 if (is_audio_encrypted) {
237 audio_content_encodings = ReadTestDataFile("webm_content_encodings");
238 tracks_element_size += audio_content_encodings->data_size();
242 if (has_video) {
243 video_track_entry = ReadTestDataFile("webm_vp8_track_entry");
244 tracks_element_size += video_track_entry->data_size();
245 if (is_video_encrypted) {
246 video_content_encodings = ReadTestDataFile("webm_content_encodings");
247 tracks_element_size += video_content_encodings->data_size();
251 if (has_text) {
252 // TODO(matthewjheaney): create an abstraction to do
253 // this (http://crbug/321454).
254 // We need it to also handle the creation of multiple text tracks.
256 // This is the track entry for a text track,
257 // TrackEntry [AE], size=30
258 // TrackNum [D7], size=1, val=3 (or 4 if use_alternate_text_track_id)
259 // TrackUID [73] [C5], size=1, value=3 (must remain constant for same
260 // track, even if TrackNum changes)
261 // TrackType [83], size=1, val=0x11
262 // CodecId [86], size=18, val="D_WEBVTT/SUBTITLES"
263 char str[] = "\xAE\x9E\xD7\x81\x03\x73\xC5\x81\x03"
264 "\x83\x81\x11\x86\x92"
265 "D_WEBVTT/SUBTITLES";
266 DCHECK_EQ(str[4], kTextTrackNum);
267 if (use_alternate_text_track_id)
268 str[4] = kAlternateTextTrackNum;
270 const int len = strlen(str);
271 DCHECK_EQ(len, 32);
272 const uint8* const buf = reinterpret_cast<const uint8*>(str);
273 text_track_entry = DecoderBuffer::CopyFrom(buf, len);
274 tracks_element_size += text_track_entry->data_size();
277 *size = ebml_header->data_size() + info->data_size() +
278 kTracksHeaderSize + tracks_element_size;
280 buffer->reset(new uint8[*size]);
282 uint8* buf = buffer->get();
283 memcpy(buf, ebml_header->data(), ebml_header->data_size());
284 buf += ebml_header->data_size();
286 memcpy(buf, info->data(), info->data_size());
287 buf += info->data_size();
289 memcpy(buf, kTracksHeader, kTracksHeaderSize);
290 WriteInt64(buf + kTracksSizeOffset, tracks_element_size);
291 buf += kTracksHeaderSize;
293 // TODO(xhwang): Simplify this! Probably have test data files that contain
294 // ContentEncodings directly instead of trying to create one at run-time.
295 if (has_audio) {
296 memcpy(buf, audio_track_entry->data(),
297 audio_track_entry->data_size());
298 if (is_audio_encrypted) {
299 memcpy(buf + audio_track_entry->data_size(),
300 audio_content_encodings->data(),
301 audio_content_encodings->data_size());
302 WriteInt64(buf + kAudioTrackSizeOffset,
303 audio_track_entry->data_size() +
304 audio_content_encodings->data_size() -
305 kAudioTrackEntryHeaderSize);
306 buf += audio_content_encodings->data_size();
308 buf += audio_track_entry->data_size();
311 if (has_video) {
312 memcpy(buf, video_track_entry->data(),
313 video_track_entry->data_size());
314 if (is_video_encrypted) {
315 memcpy(buf + video_track_entry->data_size(),
316 video_content_encodings->data(),
317 video_content_encodings->data_size());
318 WriteInt64(buf + kVideoTrackSizeOffset,
319 video_track_entry->data_size() +
320 video_content_encodings->data_size() -
321 kVideoTrackEntryHeaderSize);
322 buf += video_content_encodings->data_size();
324 buf += video_track_entry->data_size();
327 if (has_text) {
328 memcpy(buf, text_track_entry->data(),
329 text_track_entry->data_size());
330 buf += text_track_entry->data_size();
334 ChunkDemuxer::Status AddId() {
335 return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
338 ChunkDemuxer::Status AddId(const std::string& source_id, int stream_flags) {
339 bool has_audio = (stream_flags & HAS_AUDIO) != 0;
340 bool has_video = (stream_flags & HAS_VIDEO) != 0;
341 std::vector<std::string> codecs;
342 std::string type;
344 if (has_audio) {
345 codecs.push_back("vorbis");
346 type = "audio/webm";
349 if (has_video) {
350 codecs.push_back("vp8");
351 type = "video/webm";
354 if (!has_audio && !has_video) {
355 return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
358 return demuxer_->AddId(source_id, type, codecs);
361 ChunkDemuxer::Status AddIdForMp2tSource(const std::string& source_id) {
362 std::vector<std::string> codecs;
363 std::string type = "video/mp2t";
364 codecs.push_back("mp4a.40.2");
365 codecs.push_back("avc1.640028");
366 return demuxer_->AddId(source_id, type, codecs);
369 void AppendData(const uint8* data, size_t length) {
370 AppendData(kSourceId, data, length);
373 void AppendCluster(const std::string& source_id,
374 scoped_ptr<Cluster> cluster) {
375 AppendData(source_id, cluster->data(), cluster->size());
378 void AppendCluster(scoped_ptr<Cluster> cluster) {
379 AppendCluster(kSourceId, cluster.Pass());
382 void AppendCluster(int timecode, int block_count) {
383 AppendCluster(GenerateCluster(timecode, block_count));
386 void AppendSingleStreamCluster(const std::string& source_id, int track_number,
387 int timecode, int block_count) {
388 int block_duration = 0;
389 switch (track_number) {
390 case kVideoTrackNum:
391 block_duration = kVideoBlockDuration;
392 break;
393 case kAudioTrackNum:
394 block_duration = kAudioBlockDuration;
395 break;
396 case kTextTrackNum: // Fall-through.
397 case kAlternateTextTrackNum:
398 block_duration = kTextBlockDuration;
399 break;
401 ASSERT_NE(block_duration, 0);
402 int end_timecode = timecode + block_count * block_duration;
403 AppendCluster(source_id,
404 GenerateSingleStreamCluster(
405 timecode, end_timecode, track_number, block_duration));
408 struct BlockInfo {
409 BlockInfo()
410 : track_number(0),
411 timestamp_in_ms(0),
412 flags(0),
413 duration(0) {
416 BlockInfo(int tn, int ts, int f, int d)
417 : track_number(tn),
418 timestamp_in_ms(ts),
419 flags(f),
420 duration(d) {
423 int track_number;
424 int timestamp_in_ms;
425 int flags;
426 int duration;
428 bool operator< (const BlockInfo& rhs) const {
429 return timestamp_in_ms < rhs.timestamp_in_ms;
433 // |track_number| - The track number to place in
434 // |block_descriptions| - A space delimited string of block info that
435 // is used to populate |blocks|. Each block info has a timestamp in
436 // milliseconds and optionally followed by a 'K' to indicate that a block
437 // should be marked as a key frame. For example "0K 30 60" should populate
438 // |blocks| with 3 BlockInfo objects: a key frame with timestamp 0 and 2
439 // non-key-frames at 30ms and 60ms.
440 void ParseBlockDescriptions(int track_number,
441 const std::string block_descriptions,
442 std::vector<BlockInfo>* blocks) {
443 std::vector<std::string> timestamps = base::SplitString(
444 block_descriptions, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
446 for (size_t i = 0; i < timestamps.size(); ++i) {
447 std::string timestamp_str = timestamps[i];
448 BlockInfo block_info;
449 block_info.track_number = track_number;
450 block_info.flags = 0;
451 block_info.duration = 0;
453 if (base::EndsWith(timestamp_str, "K", base::CompareCase::SENSITIVE)) {
454 block_info.flags = kWebMFlagKeyframe;
455 // Remove the "K" off of the token.
456 timestamp_str = timestamp_str.substr(0, timestamps[i].length() - 1);
458 CHECK(base::StringToInt(timestamp_str, &block_info.timestamp_in_ms));
460 if (track_number == kTextTrackNum ||
461 track_number == kAlternateTextTrackNum) {
462 block_info.duration = kTextBlockDuration;
463 ASSERT_EQ(kWebMFlagKeyframe, block_info.flags)
464 << "Text block with timestamp " << block_info.timestamp_in_ms
465 << " was not marked as a key frame."
466 << " All text blocks must be key frames";
469 if (track_number == kAudioTrackNum)
470 ASSERT_TRUE(block_info.flags & kWebMFlagKeyframe);
472 blocks->push_back(block_info);
476 scoped_ptr<Cluster> GenerateCluster(const std::vector<BlockInfo>& blocks,
477 bool unknown_size) {
478 DCHECK_GT(blocks.size(), 0u);
479 ClusterBuilder cb;
481 std::vector<uint8> data(10);
482 for (size_t i = 0; i < blocks.size(); ++i) {
483 if (i == 0)
484 cb.SetClusterTimecode(blocks[i].timestamp_in_ms);
486 if (blocks[i].duration) {
487 if (blocks[i].track_number == kVideoTrackNum) {
488 AddVideoBlockGroup(&cb,
489 blocks[i].track_number, blocks[i].timestamp_in_ms,
490 blocks[i].duration, blocks[i].flags);
491 } else {
492 cb.AddBlockGroup(blocks[i].track_number, blocks[i].timestamp_in_ms,
493 blocks[i].duration, blocks[i].flags,
494 &data[0], data.size());
496 } else {
497 cb.AddSimpleBlock(blocks[i].track_number, blocks[i].timestamp_in_ms,
498 blocks[i].flags,
499 &data[0], data.size());
503 return unknown_size ? cb.FinishWithUnknownSize() : cb.Finish();
506 scoped_ptr<Cluster> GenerateCluster(
507 std::priority_queue<BlockInfo> block_queue,
508 bool unknown_size) {
509 std::vector<BlockInfo> blocks(block_queue.size());
510 for (size_t i = block_queue.size() - 1; !block_queue.empty(); --i) {
511 blocks[i] = block_queue.top();
512 block_queue.pop();
515 return GenerateCluster(blocks, unknown_size);
518 // |block_descriptions| - The block descriptions used to construct the
519 // cluster. See the documentation for ParseBlockDescriptions() for details on
520 // the string format.
521 void AppendSingleStreamCluster(const std::string& source_id, int track_number,
522 const std::string& block_descriptions) {
523 std::vector<BlockInfo> blocks;
524 ParseBlockDescriptions(track_number, block_descriptions, &blocks);
525 AppendCluster(source_id, GenerateCluster(blocks, false));
528 struct MuxedStreamInfo {
529 MuxedStreamInfo()
530 : track_number(0),
531 block_descriptions("")
534 MuxedStreamInfo(int track_num, const char* block_desc)
535 : track_number(track_num),
536 block_descriptions(block_desc) {
539 int track_number;
540 // The block description passed to ParseBlockDescriptions().
541 // See the documentation for that method for details on the string format.
542 const char* block_descriptions;
545 void AppendMuxedCluster(const MuxedStreamInfo& msi_1,
546 const MuxedStreamInfo& msi_2) {
547 std::vector<MuxedStreamInfo> msi(2);
548 msi[0] = msi_1;
549 msi[1] = msi_2;
550 AppendMuxedCluster(msi);
553 void AppendMuxedCluster(const MuxedStreamInfo& msi_1,
554 const MuxedStreamInfo& msi_2,
555 const MuxedStreamInfo& msi_3) {
556 std::vector<MuxedStreamInfo> msi(3);
557 msi[0] = msi_1;
558 msi[1] = msi_2;
559 msi[2] = msi_3;
560 AppendMuxedCluster(msi);
563 void AppendMuxedCluster(const std::vector<MuxedStreamInfo> msi) {
564 std::priority_queue<BlockInfo> block_queue;
565 for (size_t i = 0; i < msi.size(); ++i) {
566 std::vector<BlockInfo> track_blocks;
567 ParseBlockDescriptions(msi[i].track_number, msi[i].block_descriptions,
568 &track_blocks);
570 for (size_t j = 0; j < track_blocks.size(); ++j)
571 block_queue.push(track_blocks[j]);
574 AppendCluster(kSourceId, GenerateCluster(block_queue, false));
577 void AppendData(const std::string& source_id,
578 const uint8* data, size_t length) {
579 EXPECT_CALL(host_, AddBufferedTimeRange(_, _)).Times(AnyNumber());
581 demuxer_->AppendData(source_id, data, length,
582 append_window_start_for_next_append_,
583 append_window_end_for_next_append_,
584 &timestamp_offset_map_[source_id],
585 init_segment_received_cb_);
588 void AppendDataInPieces(const uint8* data, size_t length) {
589 AppendDataInPieces(data, length, 7);
592 void AppendDataInPieces(const uint8* data, size_t length, size_t piece_size) {
593 const uint8* start = data;
594 const uint8* end = data + length;
595 while (start < end) {
596 size_t append_size = std::min(piece_size,
597 static_cast<size_t>(end - start));
598 AppendData(start, append_size);
599 start += append_size;
603 void AppendInitSegment(int stream_flags) {
604 AppendInitSegmentWithSourceId(kSourceId, stream_flags);
607 void AppendInitSegmentWithSourceId(const std::string& source_id,
608 int stream_flags) {
609 AppendInitSegmentWithEncryptedInfo(source_id, stream_flags, false, false);
612 void AppendInitSegmentWithEncryptedInfo(const std::string& source_id,
613 int stream_flags,
614 bool is_audio_encrypted,
615 bool is_video_encrypted) {
616 scoped_ptr<uint8[]> info_tracks;
617 int info_tracks_size = 0;
618 CreateInitSegment(stream_flags,
619 is_audio_encrypted, is_video_encrypted,
620 &info_tracks, &info_tracks_size);
621 AppendData(source_id, info_tracks.get(), info_tracks_size);
624 void AppendGarbage() {
625 // Fill up an array with gibberish.
626 int garbage_cluster_size = 10;
627 scoped_ptr<uint8[]> garbage_cluster(new uint8[garbage_cluster_size]);
628 for (int i = 0; i < garbage_cluster_size; ++i)
629 garbage_cluster[i] = i;
630 AppendData(garbage_cluster.get(), garbage_cluster_size);
633 void InitDoneCalled(PipelineStatus expected_status,
634 PipelineStatus status) {
635 EXPECT_EQ(status, expected_status);
638 void AppendEmptyCluster(int timecode) {
639 AppendCluster(GenerateEmptyCluster(timecode));
642 PipelineStatusCB CreateInitDoneCB(const base::TimeDelta& expected_duration,
643 PipelineStatus expected_status) {
644 if (expected_duration != kNoTimestamp())
645 EXPECT_CALL(host_, SetDuration(expected_duration));
646 return CreateInitDoneCB(expected_status);
649 PipelineStatusCB CreateInitDoneCB(PipelineStatus expected_status) {
650 return base::Bind(&ChunkDemuxerTest::InitDoneCalled,
651 base::Unretained(this),
652 expected_status);
655 enum StreamFlags {
656 HAS_AUDIO = 1 << 0,
657 HAS_VIDEO = 1 << 1,
658 HAS_TEXT = 1 << 2
661 bool InitDemuxer(int stream_flags) {
662 return InitDemuxerWithEncryptionInfo(stream_flags, false, false);
665 bool InitDemuxerWithEncryptionInfo(
666 int stream_flags, bool is_audio_encrypted, bool is_video_encrypted) {
668 PipelineStatus expected_status =
669 (stream_flags != 0) ? PIPELINE_OK : PIPELINE_ERROR_DECODE;
671 base::TimeDelta expected_duration = kNoTimestamp();
672 if (expected_status == PIPELINE_OK)
673 expected_duration = kDefaultDuration();
675 EXPECT_CALL(*this, DemuxerOpened());
677 // Adding expectation prior to CreateInitDoneCB() here because InSequence
678 // tests require init segment received before duration set. Also, only
679 // expect an init segment received callback if there is actually a track in
680 // it.
681 if (stream_flags != 0)
682 EXPECT_CALL(*this, InitSegmentReceived());
684 demuxer_->Initialize(
685 &host_, CreateInitDoneCB(expected_duration, expected_status), true);
687 if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
688 return false;
690 AppendInitSegmentWithEncryptedInfo(
691 kSourceId, stream_flags,
692 is_audio_encrypted, is_video_encrypted);
693 return true;
696 bool InitDemuxerAudioAndVideoSourcesText(const std::string& audio_id,
697 const std::string& video_id,
698 bool has_text) {
699 EXPECT_CALL(*this, DemuxerOpened());
700 demuxer_->Initialize(
701 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
703 if (AddId(audio_id, HAS_AUDIO) != ChunkDemuxer::kOk)
704 return false;
705 if (AddId(video_id, HAS_VIDEO) != ChunkDemuxer::kOk)
706 return false;
708 int audio_flags = HAS_AUDIO;
709 int video_flags = HAS_VIDEO;
711 if (has_text) {
712 audio_flags |= HAS_TEXT;
713 video_flags |= HAS_TEXT;
716 EXPECT_CALL(*this, InitSegmentReceived());
717 AppendInitSegmentWithSourceId(audio_id, audio_flags);
718 EXPECT_CALL(*this, InitSegmentReceived());
719 AppendInitSegmentWithSourceId(video_id, video_flags);
720 return true;
723 bool InitDemuxerAudioAndVideoSources(const std::string& audio_id,
724 const std::string& video_id) {
725 return InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, false);
728 // Initializes the demuxer with data from 2 files with different
729 // decoder configurations. This is used to test the decoder config change
730 // logic.
732 // bear-320x240.webm VideoDecoderConfig returns 320x240 for its natural_size()
733 // bear-640x360.webm VideoDecoderConfig returns 640x360 for its natural_size()
734 // The resulting video stream returns data from each file for the following
735 // time ranges.
736 // bear-320x240.webm : [0-501) [801-2736)
737 // bear-640x360.webm : [527-793)
739 // bear-320x240.webm AudioDecoderConfig returns 3863 for its extra_data_size()
740 // bear-640x360.webm AudioDecoderConfig returns 3935 for its extra_data_size()
741 // The resulting audio stream returns data from each file for the following
742 // time ranges.
743 // bear-320x240.webm : [0-524) [779-2736)
744 // bear-640x360.webm : [527-759)
745 bool InitDemuxerWithConfigChangeData() {
746 scoped_refptr<DecoderBuffer> bear1 = ReadTestDataFile("bear-320x240.webm");
747 scoped_refptr<DecoderBuffer> bear2 = ReadTestDataFile("bear-640x360.webm");
749 EXPECT_CALL(*this, DemuxerOpened());
751 // Adding expectation prior to CreateInitDoneCB() here because InSequence
752 // tests require init segment received before duration set.
753 EXPECT_CALL(*this, InitSegmentReceived());
754 demuxer_->Initialize(
755 &host_, CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744),
756 PIPELINE_OK), true);
758 if (AddId(kSourceId, HAS_AUDIO | HAS_VIDEO) != ChunkDemuxer::kOk)
759 return false;
761 // Append the whole bear1 file.
762 // Expect duration adjustment since actual duration differs slightly from
763 // duration in the init segment.
764 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
765 AppendData(bear1->data(), bear1->data_size());
766 // Last audio frame has timestamp 2721 and duration 24 (estimated from max
767 // seen so far for audio track).
768 // Last video frame has timestamp 2703 and duration 33 (from TrackEntry
769 // DefaultDuration for video track).
770 CheckExpectedRanges(kSourceId, "{ [0,2736) }");
772 // Append initialization segment for bear2.
773 // Note: Offsets here and below are derived from
774 // media/test/data/bear-640x360-manifest.js and
775 // media/test/data/bear-320x240-manifest.js which were
776 // generated from media/test/data/bear-640x360.webm and
777 // media/test/data/bear-320x240.webm respectively.
778 EXPECT_CALL(*this, InitSegmentReceived());
779 AppendData(bear2->data(), 4340);
781 // Append a media segment that goes from [0.527000, 1.014000).
782 AppendData(bear2->data() + 55290, 18785);
783 CheckExpectedRanges(kSourceId, "{ [0,1027) [1201,2736) }");
785 // Append initialization segment for bear1 & fill gap with [779-1197)
786 // segment.
787 EXPECT_CALL(*this, InitSegmentReceived());
788 AppendData(bear1->data(), 4370);
789 AppendData(bear1->data() + 72737, 28183);
790 CheckExpectedRanges(kSourceId, "{ [0,2736) }");
792 MarkEndOfStream(PIPELINE_OK);
793 return true;
796 void ShutdownDemuxer() {
797 if (demuxer_) {
798 demuxer_->Shutdown();
799 message_loop_.RunUntilIdle();
803 void AddSimpleBlock(ClusterBuilder* cb, int track_num, int64 timecode) {
804 uint8 data[] = { 0x00 };
805 cb->AddSimpleBlock(track_num, timecode, 0, data, sizeof(data));
808 scoped_ptr<Cluster> GenerateCluster(int timecode, int block_count) {
809 return GenerateCluster(timecode, timecode, block_count);
812 void AddVideoBlockGroup(ClusterBuilder* cb, int track_num, int64 timecode,
813 int duration, int flags) {
814 const uint8* data =
815 (flags & kWebMFlagKeyframe) != 0 ? kVP8Keyframe : kVP8Interframe;
816 int size = (flags & kWebMFlagKeyframe) != 0 ? sizeof(kVP8Keyframe) :
817 sizeof(kVP8Interframe);
818 cb->AddBlockGroup(track_num, timecode, duration, flags, data, size);
821 scoped_ptr<Cluster> GenerateCluster(int first_audio_timecode,
822 int first_video_timecode,
823 int block_count) {
824 return GenerateCluster(first_audio_timecode, first_video_timecode,
825 block_count, false);
827 scoped_ptr<Cluster> GenerateCluster(int first_audio_timecode,
828 int first_video_timecode,
829 int block_count,
830 bool unknown_size) {
831 CHECK_GT(block_count, 0);
833 std::priority_queue<BlockInfo> block_queue;
835 if (block_count == 1) {
836 block_queue.push(BlockInfo(kAudioTrackNum,
837 first_audio_timecode,
838 kWebMFlagKeyframe,
839 kAudioBlockDuration));
840 return GenerateCluster(block_queue, unknown_size);
843 int audio_timecode = first_audio_timecode;
844 int video_timecode = first_video_timecode;
846 // Create simple blocks for everything except the last 2 blocks.
847 // The first video frame must be a key frame.
848 uint8 video_flag = kWebMFlagKeyframe;
849 for (int i = 0; i < block_count - 2; i++) {
850 if (audio_timecode <= video_timecode) {
851 block_queue.push(BlockInfo(kAudioTrackNum,
852 audio_timecode,
853 kWebMFlagKeyframe,
854 0));
855 audio_timecode += kAudioBlockDuration;
856 continue;
859 block_queue.push(BlockInfo(kVideoTrackNum,
860 video_timecode,
861 video_flag,
862 0));
863 video_timecode += kVideoBlockDuration;
864 video_flag = 0;
867 // Make the last 2 blocks BlockGroups so that they don't get delayed by the
868 // block duration calculation logic.
869 block_queue.push(BlockInfo(kAudioTrackNum,
870 audio_timecode,
871 kWebMFlagKeyframe,
872 kAudioBlockDuration));
873 block_queue.push(BlockInfo(kVideoTrackNum,
874 video_timecode,
875 video_flag,
876 kVideoBlockDuration));
878 return GenerateCluster(block_queue, unknown_size);
881 scoped_ptr<Cluster> GenerateSingleStreamCluster(int timecode,
882 int end_timecode,
883 int track_number,
884 int block_duration) {
885 CHECK_GT(end_timecode, timecode);
887 std::vector<uint8> data(kBlockSize);
889 ClusterBuilder cb;
890 cb.SetClusterTimecode(timecode);
892 // Create simple blocks for everything except the last block.
893 while (timecode < (end_timecode - block_duration)) {
894 cb.AddSimpleBlock(track_number, timecode, kWebMFlagKeyframe,
895 &data[0], data.size());
896 timecode += block_duration;
899 if (track_number == kVideoTrackNum) {
900 AddVideoBlockGroup(&cb, track_number, timecode, block_duration,
901 kWebMFlagKeyframe);
902 } else {
903 cb.AddBlockGroup(track_number, timecode, block_duration,
904 kWebMFlagKeyframe, &data[0], data.size());
907 return cb.Finish();
910 void Read(DemuxerStream::Type type, const DemuxerStream::ReadCB& read_cb) {
911 demuxer_->GetStream(type)->Read(read_cb);
912 message_loop_.RunUntilIdle();
915 void ReadAudio(const DemuxerStream::ReadCB& read_cb) {
916 Read(DemuxerStream::AUDIO, read_cb);
919 void ReadVideo(const DemuxerStream::ReadCB& read_cb) {
920 Read(DemuxerStream::VIDEO, read_cb);
923 void GenerateExpectedReads(int timecode, int block_count) {
924 GenerateExpectedReads(timecode, timecode, block_count);
927 void GenerateExpectedReads(int start_audio_timecode,
928 int start_video_timecode,
929 int block_count) {
930 CHECK_GT(block_count, 0);
932 if (block_count == 1) {
933 ExpectRead(DemuxerStream::AUDIO, start_audio_timecode);
934 return;
937 int audio_timecode = start_audio_timecode;
938 int video_timecode = start_video_timecode;
940 for (int i = 0; i < block_count; i++) {
941 if (audio_timecode <= video_timecode) {
942 ExpectRead(DemuxerStream::AUDIO, audio_timecode);
943 audio_timecode += kAudioBlockDuration;
944 continue;
947 ExpectRead(DemuxerStream::VIDEO, video_timecode);
948 video_timecode += kVideoBlockDuration;
952 void GenerateSingleStreamExpectedReads(int timecode,
953 int block_count,
954 DemuxerStream::Type type,
955 int block_duration) {
956 CHECK_GT(block_count, 0);
957 int stream_timecode = timecode;
959 for (int i = 0; i < block_count; i++) {
960 ExpectRead(type, stream_timecode);
961 stream_timecode += block_duration;
965 void GenerateAudioStreamExpectedReads(int timecode, int block_count) {
966 GenerateSingleStreamExpectedReads(
967 timecode, block_count, DemuxerStream::AUDIO, kAudioBlockDuration);
970 void GenerateVideoStreamExpectedReads(int timecode, int block_count) {
971 GenerateSingleStreamExpectedReads(
972 timecode, block_count, DemuxerStream::VIDEO, kVideoBlockDuration);
975 scoped_ptr<Cluster> GenerateEmptyCluster(int timecode) {
976 ClusterBuilder cb;
977 cb.SetClusterTimecode(timecode);
978 return cb.Finish();
981 void CheckExpectedRanges(const std::string& expected) {
982 CheckExpectedRanges(kSourceId, expected);
985 void CheckExpectedRanges(const std::string& id,
986 const std::string& expected) {
987 CheckExpectedRanges(demuxer_->GetBufferedRanges(id), expected);
990 void CheckExpectedRanges(DemuxerStream::Type type,
991 const std::string& expected) {
992 ChunkDemuxerStream* stream =
993 static_cast<ChunkDemuxerStream*>(demuxer_->GetStream(type));
994 CheckExpectedRanges(stream->GetBufferedRanges(kDefaultDuration()),
995 expected);
998 void CheckExpectedRanges(const Ranges<base::TimeDelta>& r,
999 const std::string& expected) {
1000 std::stringstream ss;
1001 ss << "{ ";
1002 for (size_t i = 0; i < r.size(); ++i) {
1003 ss << "[" << r.start(i).InMilliseconds() << ","
1004 << r.end(i).InMilliseconds() << ") ";
1006 ss << "}";
1007 EXPECT_EQ(expected, ss.str());
1010 MOCK_METHOD2(ReadDone, void(DemuxerStream::Status status,
1011 const scoped_refptr<DecoderBuffer>&));
1013 void StoreStatusAndBuffer(DemuxerStream::Status* status_out,
1014 scoped_refptr<DecoderBuffer>* buffer_out,
1015 DemuxerStream::Status status,
1016 const scoped_refptr<DecoderBuffer>& buffer) {
1017 *status_out = status;
1018 *buffer_out = buffer;
1021 void ReadUntilNotOkOrEndOfStream(DemuxerStream::Type type,
1022 DemuxerStream::Status* status,
1023 base::TimeDelta* last_timestamp) {
1024 DemuxerStream* stream = demuxer_->GetStream(type);
1025 scoped_refptr<DecoderBuffer> buffer;
1027 *last_timestamp = kNoTimestamp();
1028 do {
1029 stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
1030 base::Unretained(this), status, &buffer));
1031 base::MessageLoop::current()->RunUntilIdle();
1032 if (*status == DemuxerStream::kOk && !buffer->end_of_stream())
1033 *last_timestamp = buffer->timestamp();
1034 } while (*status == DemuxerStream::kOk && !buffer->end_of_stream());
1037 void ExpectEndOfStream(DemuxerStream::Type type) {
1038 EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk, IsEndOfStream()));
1039 demuxer_->GetStream(type)->Read(base::Bind(
1040 &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
1041 message_loop_.RunUntilIdle();
1044 void ExpectRead(DemuxerStream::Type type, int64 timestamp_in_ms) {
1045 EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk,
1046 HasTimestamp(timestamp_in_ms)));
1047 demuxer_->GetStream(type)->Read(base::Bind(
1048 &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
1049 message_loop_.RunUntilIdle();
1052 void ExpectConfigChanged(DemuxerStream::Type type) {
1053 EXPECT_CALL(*this, ReadDone(DemuxerStream::kConfigChanged, _));
1054 demuxer_->GetStream(type)->Read(base::Bind(
1055 &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
1056 message_loop_.RunUntilIdle();
1059 void CheckExpectedBuffers(DemuxerStream* stream,
1060 const std::string& expected) {
1061 std::vector<std::string> timestamps = base::SplitString(
1062 expected, " ", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
1063 std::stringstream ss;
1064 for (size_t i = 0; i < timestamps.size(); ++i) {
1065 // Initialize status to kAborted since it's possible for Read() to return
1066 // without calling StoreStatusAndBuffer() if it doesn't have any buffers
1067 // left to return.
1068 DemuxerStream::Status status = DemuxerStream::kAborted;
1069 scoped_refptr<DecoderBuffer> buffer;
1070 stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
1071 base::Unretained(this), &status, &buffer));
1072 base::MessageLoop::current()->RunUntilIdle();
1073 if (status != DemuxerStream::kOk || buffer->end_of_stream())
1074 break;
1076 if (i > 0)
1077 ss << " ";
1078 ss << buffer->timestamp().InMilliseconds();
1080 if (buffer->is_key_frame())
1081 ss << "K";
1083 // Handle preroll buffers.
1084 if (base::EndsWith(timestamps[i], "P", base::CompareCase::SENSITIVE)) {
1085 ASSERT_EQ(kInfiniteDuration(), buffer->discard_padding().first);
1086 ASSERT_EQ(base::TimeDelta(), buffer->discard_padding().second);
1087 ss << "P";
1090 EXPECT_EQ(expected, ss.str());
1093 MOCK_METHOD1(Checkpoint, void(int id));
1095 struct BufferTimestamps {
1096 int video_time_ms;
1097 int audio_time_ms;
1099 static const int kSkip = -1;
1101 // Test parsing a WebM file.
1102 // |filename| - The name of the file in media/test/data to parse.
1103 // |timestamps| - The expected timestamps on the parsed buffers.
1104 // a timestamp of kSkip indicates that a Read() call for that stream
1105 // shouldn't be made on that iteration of the loop. If both streams have
1106 // a kSkip then the loop will terminate.
1107 bool ParseWebMFile(const std::string& filename,
1108 const BufferTimestamps* timestamps,
1109 const base::TimeDelta& duration) {
1110 return ParseWebMFile(filename, timestamps, duration, HAS_AUDIO | HAS_VIDEO);
1113 bool ParseWebMFile(const std::string& filename,
1114 const BufferTimestamps* timestamps,
1115 const base::TimeDelta& duration,
1116 int stream_flags) {
1117 EXPECT_CALL(*this, DemuxerOpened());
1118 demuxer_->Initialize(
1119 &host_, CreateInitDoneCB(duration, PIPELINE_OK), true);
1121 if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
1122 return false;
1124 // Read a WebM file into memory and send the data to the demuxer.
1125 scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(filename);
1126 EXPECT_CALL(*this, InitSegmentReceived());
1127 AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
1129 // Verify that the timestamps on the first few packets match what we
1130 // expect.
1131 for (size_t i = 0;
1132 (timestamps[i].audio_time_ms != kSkip ||
1133 timestamps[i].video_time_ms != kSkip);
1134 i++) {
1135 bool audio_read_done = false;
1136 bool video_read_done = false;
1138 if (timestamps[i].audio_time_ms != kSkip) {
1139 ReadAudio(base::Bind(&OnReadDone,
1140 base::TimeDelta::FromMilliseconds(
1141 timestamps[i].audio_time_ms),
1142 &audio_read_done));
1143 EXPECT_TRUE(audio_read_done);
1146 if (timestamps[i].video_time_ms != kSkip) {
1147 ReadVideo(base::Bind(&OnReadDone,
1148 base::TimeDelta::FromMilliseconds(
1149 timestamps[i].video_time_ms),
1150 &video_read_done));
1151 EXPECT_TRUE(video_read_done);
1155 return true;
1158 MOCK_METHOD0(DemuxerOpened, void());
1159 MOCK_METHOD2(OnEncryptedMediaInitData,
1160 void(EmeInitDataType init_data_type,
1161 const std::vector<uint8>& init_data));
1163 MOCK_METHOD0(InitSegmentReceived, void(void));
1165 void Seek(base::TimeDelta seek_time) {
1166 demuxer_->StartWaitingForSeek(seek_time);
1167 demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
1168 message_loop_.RunUntilIdle();
1171 void MarkEndOfStream(PipelineStatus status) {
1172 demuxer_->MarkEndOfStream(status);
1173 message_loop_.RunUntilIdle();
1176 bool SetTimestampOffset(const std::string& id,
1177 base::TimeDelta timestamp_offset) {
1178 if (demuxer_->IsParsingMediaSegment(id))
1179 return false;
1181 timestamp_offset_map_[id] = timestamp_offset;
1182 return true;
1185 base::MessageLoop message_loop_;
1186 MockDemuxerHost host_;
1188 scoped_ptr<ChunkDemuxer> demuxer_;
1189 ChunkDemuxer::InitSegmentReceivedCB init_segment_received_cb_;
1191 base::TimeDelta append_window_start_for_next_append_;
1192 base::TimeDelta append_window_end_for_next_append_;
1194 // Map of source id to timestamp offset to use for the next AppendData()
1195 // operation for that source id.
1196 std::map<std::string, base::TimeDelta> timestamp_offset_map_;
1198 private:
1199 DISALLOW_COPY_AND_ASSIGN(ChunkDemuxerTest);
1202 TEST_F(ChunkDemuxerTest, Init) {
1203 // Test no streams, audio-only, video-only, and audio & video scenarios.
1204 // Audio and video streams can be encrypted or not encrypted.
1205 for (int i = 0; i < 16; i++) {
1206 bool has_audio = (i & 0x1) != 0;
1207 bool has_video = (i & 0x2) != 0;
1208 bool is_audio_encrypted = (i & 0x4) != 0;
1209 bool is_video_encrypted = (i & 0x8) != 0;
1211 // No test on invalid combination.
1212 if ((!has_audio && is_audio_encrypted) ||
1213 (!has_video && is_video_encrypted)) {
1214 continue;
1217 CreateNewDemuxer();
1219 if (is_audio_encrypted || is_video_encrypted) {
1220 int need_key_count = (is_audio_encrypted ? 1 : 0) +
1221 (is_video_encrypted ? 1 : 0);
1222 EXPECT_CALL(*this, OnEncryptedMediaInitData(
1223 EmeInitDataType::WEBM,
1224 std::vector<uint8>(
1225 kEncryptedMediaInitData,
1226 kEncryptedMediaInitData +
1227 arraysize(kEncryptedMediaInitData))))
1228 .Times(Exactly(need_key_count));
1231 int stream_flags = 0;
1232 if (has_audio)
1233 stream_flags |= HAS_AUDIO;
1235 if (has_video)
1236 stream_flags |= HAS_VIDEO;
1238 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1239 stream_flags, is_audio_encrypted, is_video_encrypted));
1241 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1242 if (has_audio) {
1243 ASSERT_TRUE(audio_stream);
1245 const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
1246 EXPECT_EQ(kCodecVorbis, config.codec());
1247 EXPECT_EQ(32, config.bits_per_channel());
1248 EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
1249 EXPECT_EQ(44100, config.samples_per_second());
1250 EXPECT_TRUE(config.extra_data());
1251 EXPECT_GT(config.extra_data_size(), 0u);
1252 EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
1253 EXPECT_EQ(is_audio_encrypted,
1254 audio_stream->audio_decoder_config().is_encrypted());
1255 EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
1256 ->supports_partial_append_window_trimming());
1257 } else {
1258 EXPECT_FALSE(audio_stream);
1261 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1262 if (has_video) {
1263 EXPECT_TRUE(video_stream);
1264 EXPECT_EQ(is_video_encrypted,
1265 video_stream->video_decoder_config().is_encrypted());
1266 EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
1267 ->supports_partial_append_window_trimming());
1268 } else {
1269 EXPECT_FALSE(video_stream);
1272 ShutdownDemuxer();
1273 demuxer_.reset();
1277 // TODO(acolwell): Fold this test into Init tests since the tests are
1278 // almost identical.
1279 TEST_F(ChunkDemuxerTest, InitText) {
1280 // Test with 1 video stream and 1 text streams, and 0 or 1 audio streams.
1281 // No encryption cases handled here.
1282 bool has_video = true;
1283 bool is_audio_encrypted = false;
1284 bool is_video_encrypted = false;
1285 for (int i = 0; i < 2; i++) {
1286 bool has_audio = (i & 0x1) != 0;
1288 CreateNewDemuxer();
1290 DemuxerStream* text_stream = NULL;
1291 TextTrackConfig text_config;
1292 EXPECT_CALL(host_, AddTextStream(_, _))
1293 .WillOnce(DoAll(SaveArg<0>(&text_stream),
1294 SaveArg<1>(&text_config)));
1296 int stream_flags = HAS_TEXT;
1297 if (has_audio)
1298 stream_flags |= HAS_AUDIO;
1300 if (has_video)
1301 stream_flags |= HAS_VIDEO;
1303 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1304 stream_flags, is_audio_encrypted, is_video_encrypted));
1305 ASSERT_TRUE(text_stream);
1306 EXPECT_EQ(DemuxerStream::TEXT, text_stream->type());
1307 EXPECT_EQ(kTextSubtitles, text_config.kind());
1308 EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(text_stream)
1309 ->supports_partial_append_window_trimming());
1311 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1312 if (has_audio) {
1313 ASSERT_TRUE(audio_stream);
1315 const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
1316 EXPECT_EQ(kCodecVorbis, config.codec());
1317 EXPECT_EQ(32, config.bits_per_channel());
1318 EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
1319 EXPECT_EQ(44100, config.samples_per_second());
1320 EXPECT_TRUE(config.extra_data());
1321 EXPECT_GT(config.extra_data_size(), 0u);
1322 EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
1323 EXPECT_EQ(is_audio_encrypted,
1324 audio_stream->audio_decoder_config().is_encrypted());
1325 EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
1326 ->supports_partial_append_window_trimming());
1327 } else {
1328 EXPECT_FALSE(audio_stream);
1331 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1332 if (has_video) {
1333 EXPECT_TRUE(video_stream);
1334 EXPECT_EQ(is_video_encrypted,
1335 video_stream->video_decoder_config().is_encrypted());
1336 EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
1337 ->supports_partial_append_window_trimming());
1338 } else {
1339 EXPECT_FALSE(video_stream);
1342 ShutdownDemuxer();
1343 demuxer_.reset();
1347 TEST_F(ChunkDemuxerTest, SingleTextTrackIdChange) {
1348 // Test with 1 video stream, 1 audio, and 1 text stream. Send a second init
1349 // segment in which the text track ID changes. Verify appended buffers before
1350 // and after the second init segment map to the same underlying track buffers.
1351 CreateNewDemuxer();
1352 DemuxerStream* text_stream = NULL;
1353 TextTrackConfig text_config;
1354 EXPECT_CALL(host_, AddTextStream(_, _))
1355 .WillOnce(DoAll(SaveArg<0>(&text_stream),
1356 SaveArg<1>(&text_config)));
1357 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1358 HAS_TEXT | HAS_AUDIO | HAS_VIDEO, false, false));
1359 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1360 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1361 ASSERT_TRUE(audio_stream);
1362 ASSERT_TRUE(video_stream);
1363 ASSERT_TRUE(text_stream);
1365 AppendMuxedCluster(
1366 MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
1367 MuxedStreamInfo(kVideoTrackNum, "0K 30"),
1368 MuxedStreamInfo(kTextTrackNum, "10K"));
1369 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1371 scoped_ptr<uint8[]> info_tracks;
1372 int info_tracks_size = 0;
1373 CreateInitSegmentWithAlternateTextTrackNum(HAS_TEXT | HAS_AUDIO | HAS_VIDEO,
1374 false, false,
1375 &info_tracks, &info_tracks_size);
1376 EXPECT_CALL(*this, InitSegmentReceived());
1377 demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
1378 append_window_start_for_next_append_,
1379 append_window_end_for_next_append_,
1380 &timestamp_offset_map_[kSourceId],
1381 init_segment_received_cb_);
1383 AppendMuxedCluster(
1384 MuxedStreamInfo(kAudioTrackNum, "46K 69K"),
1385 MuxedStreamInfo(kVideoTrackNum, "60K"),
1386 MuxedStreamInfo(kAlternateTextTrackNum, "45K"));
1388 CheckExpectedRanges(kSourceId, "{ [0,92) }");
1389 CheckExpectedBuffers(audio_stream, "0K 23K 46K 69K");
1390 CheckExpectedBuffers(video_stream, "0K 30 60K");
1391 CheckExpectedBuffers(text_stream, "10K 45K");
1393 ShutdownDemuxer();
1396 TEST_F(ChunkDemuxerTest, InitSegmentSetsNeedRandomAccessPointFlag) {
1397 // Tests that non-key-frames following an init segment are allowed
1398 // and dropped, as expected if the initialization segment received
1399 // algorithm correctly sets the needs random access point flag to true for all
1400 // track buffers. Note that the first initialization segment is insufficient
1401 // to fully test this since needs random access point flag initializes to
1402 // true.
1403 CreateNewDemuxer();
1404 DemuxerStream* text_stream = NULL;
1405 EXPECT_CALL(host_, AddTextStream(_, _))
1406 .WillOnce(SaveArg<0>(&text_stream));
1407 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1408 HAS_TEXT | HAS_AUDIO | HAS_VIDEO, false, false));
1409 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1410 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1411 ASSERT_TRUE(audio_stream && video_stream && text_stream);
1413 AppendMuxedCluster(
1414 MuxedStreamInfo(kAudioTrackNum, "23K"),
1415 MuxedStreamInfo(kVideoTrackNum, "0 30K"),
1416 MuxedStreamInfo(kTextTrackNum, "25K 40K"));
1417 CheckExpectedRanges(kSourceId, "{ [23,46) }");
1419 EXPECT_CALL(*this, InitSegmentReceived());
1420 AppendInitSegment(HAS_TEXT | HAS_AUDIO | HAS_VIDEO);
1421 AppendMuxedCluster(
1422 MuxedStreamInfo(kAudioTrackNum, "46K 69K"),
1423 MuxedStreamInfo(kVideoTrackNum, "60 90K"),
1424 MuxedStreamInfo(kTextTrackNum, "80K 90K"));
1425 CheckExpectedRanges(kSourceId, "{ [23,92) }");
1427 CheckExpectedBuffers(audio_stream, "23K 46K 69K");
1428 CheckExpectedBuffers(video_stream, "30K 90K");
1429 CheckExpectedBuffers(text_stream, "25K 40K 80K 90K");
1432 // Make sure that the demuxer reports an error if Shutdown()
1433 // is called before all the initialization segments are appended.
1434 TEST_F(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppended) {
1435 EXPECT_CALL(*this, DemuxerOpened());
1436 demuxer_->Initialize(
1437 &host_, CreateInitDoneCB(
1438 kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1440 EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
1441 EXPECT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
1443 EXPECT_CALL(*this, InitSegmentReceived());
1444 AppendInitSegmentWithSourceId("audio", HAS_AUDIO);
1446 ShutdownDemuxer();
1449 TEST_F(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppendedText) {
1450 EXPECT_CALL(*this, DemuxerOpened());
1451 demuxer_->Initialize(
1452 &host_, CreateInitDoneCB(
1453 kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1455 EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
1456 EXPECT_EQ(AddId("video_and_text", HAS_VIDEO), ChunkDemuxer::kOk);
1458 EXPECT_CALL(host_, AddTextStream(_, _))
1459 .Times(Exactly(1));
1461 EXPECT_CALL(*this, InitSegmentReceived());
1462 AppendInitSegmentWithSourceId("video_and_text", HAS_VIDEO | HAS_TEXT);
1464 ShutdownDemuxer();
1467 // Verifies that all streams waiting for data receive an end of stream
1468 // buffer when Shutdown() is called.
1469 TEST_F(ChunkDemuxerTest, Shutdown_EndOfStreamWhileWaitingForData) {
1470 DemuxerStream* text_stream = NULL;
1471 EXPECT_CALL(host_, AddTextStream(_, _))
1472 .WillOnce(SaveArg<0>(&text_stream));
1473 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
1475 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1476 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1478 bool audio_read_done = false;
1479 bool video_read_done = false;
1480 bool text_read_done = false;
1481 audio_stream->Read(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
1482 video_stream->Read(base::Bind(&OnReadDone_EOSExpected, &video_read_done));
1483 text_stream->Read(base::Bind(&OnReadDone_EOSExpected, &text_read_done));
1484 message_loop_.RunUntilIdle();
1486 EXPECT_FALSE(audio_read_done);
1487 EXPECT_FALSE(video_read_done);
1488 EXPECT_FALSE(text_read_done);
1490 ShutdownDemuxer();
1492 EXPECT_TRUE(audio_read_done);
1493 EXPECT_TRUE(video_read_done);
1494 EXPECT_TRUE(text_read_done);
1497 // Test that Seek() completes successfully when the first cluster
1498 // arrives.
1499 TEST_F(ChunkDemuxerTest, AppendDataAfterSeek) {
1500 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1501 AppendCluster(kDefaultFirstCluster());
1503 InSequence s;
1505 EXPECT_CALL(*this, Checkpoint(1));
1507 Seek(base::TimeDelta::FromMilliseconds(46));
1509 EXPECT_CALL(*this, Checkpoint(2));
1511 Checkpoint(1);
1513 AppendCluster(kDefaultSecondCluster());
1515 message_loop_.RunUntilIdle();
1517 Checkpoint(2);
1520 // Test that parsing errors are handled for clusters appended after init.
1521 TEST_F(ChunkDemuxerTest, ErrorWhileParsingClusterAfterInit) {
1522 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1523 AppendCluster(kDefaultFirstCluster());
1525 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1526 AppendGarbage();
1529 // Test the case where a Seek() is requested while the parser
1530 // is in the middle of cluster. This is to verify that the parser
1531 // does not reset itself on a seek.
1532 TEST_F(ChunkDemuxerTest, SeekWhileParsingCluster) {
1533 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1535 InSequence s;
1537 scoped_ptr<Cluster> cluster_a(GenerateCluster(0, 6));
1539 // Split the cluster into two appends at an arbitrary point near the end.
1540 int first_append_size = cluster_a->size() - 11;
1541 int second_append_size = cluster_a->size() - first_append_size;
1543 // Append the first part of the cluster.
1544 AppendData(cluster_a->data(), first_append_size);
1546 ExpectRead(DemuxerStream::AUDIO, 0);
1547 ExpectRead(DemuxerStream::VIDEO, 0);
1548 ExpectRead(DemuxerStream::AUDIO, kAudioBlockDuration);
1550 Seek(base::TimeDelta::FromSeconds(5));
1552 // Append the rest of the cluster.
1553 AppendData(cluster_a->data() + first_append_size, second_append_size);
1555 // Append the new cluster and verify that only the blocks
1556 // in the new cluster are returned.
1557 AppendCluster(GenerateCluster(5000, 6));
1558 GenerateExpectedReads(5000, 6);
1561 // Test the case where AppendData() is called before Init().
1562 TEST_F(ChunkDemuxerTest, AppendDataBeforeInit) {
1563 scoped_ptr<uint8[]> info_tracks;
1564 int info_tracks_size = 0;
1565 CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
1566 false, false, &info_tracks, &info_tracks_size);
1567 demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
1568 append_window_start_for_next_append_,
1569 append_window_end_for_next_append_,
1570 &timestamp_offset_map_[kSourceId],
1571 init_segment_received_cb_);
1574 // Make sure Read() callbacks are dispatched with the proper data.
1575 TEST_F(ChunkDemuxerTest, Read) {
1576 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1578 AppendCluster(kDefaultFirstCluster());
1580 bool audio_read_done = false;
1581 bool video_read_done = false;
1582 ReadAudio(base::Bind(&OnReadDone,
1583 base::TimeDelta::FromMilliseconds(0),
1584 &audio_read_done));
1585 ReadVideo(base::Bind(&OnReadDone,
1586 base::TimeDelta::FromMilliseconds(0),
1587 &video_read_done));
1589 EXPECT_TRUE(audio_read_done);
1590 EXPECT_TRUE(video_read_done);
1593 TEST_F(ChunkDemuxerTest, OutOfOrderClusters) {
1594 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1595 AppendCluster(kDefaultFirstCluster());
1596 AppendCluster(GenerateCluster(10, 4));
1598 // Make sure that AppendCluster() does not fail with a cluster that has
1599 // overlaps with the previously appended cluster.
1600 AppendCluster(GenerateCluster(5, 4));
1602 // Verify that AppendData() can still accept more data.
1603 scoped_ptr<Cluster> cluster_c(GenerateCluster(45, 2));
1604 demuxer_->AppendData(kSourceId, cluster_c->data(), cluster_c->size(),
1605 append_window_start_for_next_append_,
1606 append_window_end_for_next_append_,
1607 &timestamp_offset_map_[kSourceId],
1608 init_segment_received_cb_);
1611 TEST_F(ChunkDemuxerTest, NonMonotonicButAboveClusterTimecode) {
1612 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1613 AppendCluster(kDefaultFirstCluster());
1615 ClusterBuilder cb;
1617 // Test the case where block timecodes are not monotonically
1618 // increasing but stay above the cluster timecode.
1619 cb.SetClusterTimecode(5);
1620 AddSimpleBlock(&cb, kAudioTrackNum, 5);
1621 AddSimpleBlock(&cb, kVideoTrackNum, 10);
1622 AddSimpleBlock(&cb, kAudioTrackNum, 7);
1623 AddSimpleBlock(&cb, kVideoTrackNum, 15);
1625 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1626 AppendCluster(cb.Finish());
1628 // Verify that AppendData() ignores data after the error.
1629 scoped_ptr<Cluster> cluster_b(GenerateCluster(20, 2));
1630 demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
1631 append_window_start_for_next_append_,
1632 append_window_end_for_next_append_,
1633 &timestamp_offset_map_[kSourceId],
1634 init_segment_received_cb_);
1637 TEST_F(ChunkDemuxerTest, BackwardsAndBeforeClusterTimecode) {
1638 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1639 AppendCluster(kDefaultFirstCluster());
1641 ClusterBuilder cb;
1643 // Test timecodes going backwards and including values less than the cluster
1644 // timecode.
1645 cb.SetClusterTimecode(5);
1646 AddSimpleBlock(&cb, kAudioTrackNum, 5);
1647 AddSimpleBlock(&cb, kVideoTrackNum, 5);
1648 AddSimpleBlock(&cb, kAudioTrackNum, 3);
1649 AddSimpleBlock(&cb, kVideoTrackNum, 3);
1651 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1652 AppendCluster(cb.Finish());
1654 // Verify that AppendData() ignores data after the error.
1655 scoped_ptr<Cluster> cluster_b(GenerateCluster(6, 2));
1656 demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
1657 append_window_start_for_next_append_,
1658 append_window_end_for_next_append_,
1659 &timestamp_offset_map_[kSourceId],
1660 init_segment_received_cb_);
1664 TEST_F(ChunkDemuxerTest, PerStreamMonotonicallyIncreasingTimestamps) {
1665 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1666 AppendCluster(kDefaultFirstCluster());
1668 ClusterBuilder cb;
1670 // Test monotonic increasing timestamps on a per stream
1671 // basis.
1672 cb.SetClusterTimecode(5);
1673 AddSimpleBlock(&cb, kAudioTrackNum, 5);
1674 AddSimpleBlock(&cb, kVideoTrackNum, 5);
1675 AddSimpleBlock(&cb, kAudioTrackNum, 4);
1676 AddSimpleBlock(&cb, kVideoTrackNum, 7);
1678 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1679 AppendCluster(cb.Finish());
1682 // Test the case where a cluster is passed to AppendCluster() before
1683 // INFO & TRACKS data.
1684 TEST_F(ChunkDemuxerTest, ClusterBeforeInitSegment) {
1685 EXPECT_CALL(*this, DemuxerOpened());
1686 demuxer_->Initialize(
1687 &host_, NewExpectedStatusCB(PIPELINE_ERROR_DECODE), true);
1689 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1691 AppendCluster(GenerateCluster(0, 1));
1694 // Test cases where we get an MarkEndOfStream() call during initialization.
1695 TEST_F(ChunkDemuxerTest, EOSDuringInit) {
1696 EXPECT_CALL(*this, DemuxerOpened());
1697 demuxer_->Initialize(
1698 &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1699 MarkEndOfStream(PIPELINE_OK);
1702 TEST_F(ChunkDemuxerTest, EndOfStreamWithNoAppend) {
1703 EXPECT_CALL(*this, DemuxerOpened());
1704 demuxer_->Initialize(
1705 &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1707 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1709 CheckExpectedRanges("{ }");
1710 MarkEndOfStream(PIPELINE_OK);
1711 ShutdownDemuxer();
1712 CheckExpectedRanges("{ }");
1713 demuxer_->RemoveId(kSourceId);
1714 demuxer_.reset();
1717 TEST_F(ChunkDemuxerTest, EndOfStreamWithNoMediaAppend) {
1718 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1720 CheckExpectedRanges("{ }");
1721 MarkEndOfStream(PIPELINE_OK);
1722 CheckExpectedRanges("{ }");
1725 TEST_F(ChunkDemuxerTest, DecodeErrorEndOfStream) {
1726 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1728 AppendCluster(kDefaultFirstCluster());
1729 CheckExpectedRanges(kDefaultFirstClusterRange);
1731 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1732 MarkEndOfStream(PIPELINE_ERROR_DECODE);
1733 CheckExpectedRanges(kDefaultFirstClusterRange);
1736 TEST_F(ChunkDemuxerTest, NetworkErrorEndOfStream) {
1737 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1739 AppendCluster(kDefaultFirstCluster());
1740 CheckExpectedRanges(kDefaultFirstClusterRange);
1742 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_NETWORK));
1743 MarkEndOfStream(PIPELINE_ERROR_NETWORK);
1746 // Helper class to reduce duplicate code when testing end of stream
1747 // Read() behavior.
1748 class EndOfStreamHelper {
1749 public:
1750 explicit EndOfStreamHelper(Demuxer* demuxer)
1751 : demuxer_(demuxer),
1752 audio_read_done_(false),
1753 video_read_done_(false) {
1756 // Request a read on the audio and video streams.
1757 void RequestReads() {
1758 EXPECT_FALSE(audio_read_done_);
1759 EXPECT_FALSE(video_read_done_);
1761 DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
1762 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
1764 audio->Read(base::Bind(&OnEndOfStreamReadDone, &audio_read_done_));
1765 video->Read(base::Bind(&OnEndOfStreamReadDone, &video_read_done_));
1766 base::MessageLoop::current()->RunUntilIdle();
1769 // Check to see if |audio_read_done_| and |video_read_done_| variables
1770 // match |expected|.
1771 void CheckIfReadDonesWereCalled(bool expected) {
1772 base::MessageLoop::current()->RunUntilIdle();
1773 EXPECT_EQ(expected, audio_read_done_);
1774 EXPECT_EQ(expected, video_read_done_);
1777 private:
1778 static void OnEndOfStreamReadDone(
1779 bool* called,
1780 DemuxerStream::Status status,
1781 const scoped_refptr<DecoderBuffer>& buffer) {
1782 EXPECT_EQ(status, DemuxerStream::kOk);
1783 EXPECT_TRUE(buffer->end_of_stream());
1784 *called = true;
1787 Demuxer* demuxer_;
1788 bool audio_read_done_;
1789 bool video_read_done_;
1791 DISALLOW_COPY_AND_ASSIGN(EndOfStreamHelper);
1794 // Make sure that all pending reads that we don't have media data for get an
1795 // "end of stream" buffer when MarkEndOfStream() is called.
1796 TEST_F(ChunkDemuxerTest, EndOfStreamWithPendingReads) {
1797 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1799 AppendCluster(GenerateCluster(0, 2));
1801 bool audio_read_done_1 = false;
1802 bool video_read_done_1 = false;
1803 EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
1804 EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
1806 ReadAudio(base::Bind(&OnReadDone,
1807 base::TimeDelta::FromMilliseconds(0),
1808 &audio_read_done_1));
1809 ReadVideo(base::Bind(&OnReadDone,
1810 base::TimeDelta::FromMilliseconds(0),
1811 &video_read_done_1));
1812 message_loop_.RunUntilIdle();
1814 EXPECT_TRUE(audio_read_done_1);
1815 EXPECT_TRUE(video_read_done_1);
1817 end_of_stream_helper_1.RequestReads();
1819 EXPECT_CALL(host_, SetDuration(
1820 base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
1821 MarkEndOfStream(PIPELINE_OK);
1823 end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
1825 end_of_stream_helper_2.RequestReads();
1826 end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
1829 // Make sure that all Read() calls after we get an MarkEndOfStream()
1830 // call return an "end of stream" buffer.
1831 TEST_F(ChunkDemuxerTest, ReadsAfterEndOfStream) {
1832 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1834 AppendCluster(GenerateCluster(0, 2));
1836 bool audio_read_done_1 = false;
1837 bool video_read_done_1 = false;
1838 EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
1839 EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
1840 EndOfStreamHelper end_of_stream_helper_3(demuxer_.get());
1842 ReadAudio(base::Bind(&OnReadDone,
1843 base::TimeDelta::FromMilliseconds(0),
1844 &audio_read_done_1));
1845 ReadVideo(base::Bind(&OnReadDone,
1846 base::TimeDelta::FromMilliseconds(0),
1847 &video_read_done_1));
1849 end_of_stream_helper_1.RequestReads();
1851 EXPECT_TRUE(audio_read_done_1);
1852 EXPECT_TRUE(video_read_done_1);
1853 end_of_stream_helper_1.CheckIfReadDonesWereCalled(false);
1855 EXPECT_CALL(host_, SetDuration(
1856 base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
1857 MarkEndOfStream(PIPELINE_OK);
1859 end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
1861 // Request a few more reads and make sure we immediately get
1862 // end of stream buffers.
1863 end_of_stream_helper_2.RequestReads();
1864 end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
1866 end_of_stream_helper_3.RequestReads();
1867 end_of_stream_helper_3.CheckIfReadDonesWereCalled(true);
1870 TEST_F(ChunkDemuxerTest, EndOfStreamDuringCanceledSeek) {
1871 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1873 AppendCluster(0, 10);
1874 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(138)));
1875 MarkEndOfStream(PIPELINE_OK);
1877 // Start the first seek.
1878 Seek(base::TimeDelta::FromMilliseconds(20));
1880 // Simulate another seek being requested before the first
1881 // seek has finished prerolling.
1882 base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(30);
1883 demuxer_->CancelPendingSeek(seek_time2);
1885 // Finish second seek.
1886 Seek(seek_time2);
1888 DemuxerStream::Status status;
1889 base::TimeDelta last_timestamp;
1891 // Make sure audio can reach end of stream.
1892 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
1893 ASSERT_EQ(status, DemuxerStream::kOk);
1895 // Make sure video can reach end of stream.
1896 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
1897 ASSERT_EQ(status, DemuxerStream::kOk);
1900 // Verify buffered range change behavior for audio/video/text tracks.
1901 TEST_F(ChunkDemuxerTest, EndOfStreamRangeChanges) {
1902 DemuxerStream* text_stream = NULL;
1904 EXPECT_CALL(host_, AddTextStream(_, _))
1905 .WillOnce(SaveArg<0>(&text_stream));
1906 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
1908 AppendMuxedCluster(
1909 MuxedStreamInfo(kVideoTrackNum, "0K 33"),
1910 MuxedStreamInfo(kAudioTrackNum, "0K 23K"));
1912 // Check expected ranges and verify that an empty text track does not
1913 // affect the expected ranges.
1914 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1916 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
1917 MarkEndOfStream(PIPELINE_OK);
1919 // Check expected ranges and verify that an empty text track does not
1920 // affect the expected ranges.
1921 CheckExpectedRanges(kSourceId, "{ [0,66) }");
1923 // Unmark end of stream state and verify that the ranges return to
1924 // their pre-"end of stream" values.
1925 demuxer_->UnmarkEndOfStream();
1926 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1928 // Add text track data and verify that the buffered ranges don't change
1929 // since the intersection of all the tracks doesn't change.
1930 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(200)));
1931 AppendMuxedCluster(
1932 MuxedStreamInfo(kVideoTrackNum, "0K 33"),
1933 MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
1934 MuxedStreamInfo(kTextTrackNum, "0K 100K"));
1935 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1937 // Mark end of stream and verify that text track data is reflected in
1938 // the new range.
1939 MarkEndOfStream(PIPELINE_OK);
1940 CheckExpectedRanges(kSourceId, "{ [0,200) }");
1943 // Make sure AppendData() will accept elements that span multiple calls.
1944 TEST_F(ChunkDemuxerTest, AppendingInPieces) {
1945 EXPECT_CALL(*this, DemuxerOpened());
1946 demuxer_->Initialize(
1947 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
1949 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1951 scoped_ptr<uint8[]> info_tracks;
1952 int info_tracks_size = 0;
1953 CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
1954 false, false, &info_tracks, &info_tracks_size);
1956 scoped_ptr<Cluster> cluster_a(kDefaultFirstCluster());
1957 scoped_ptr<Cluster> cluster_b(kDefaultSecondCluster());
1959 size_t buffer_size = info_tracks_size + cluster_a->size() + cluster_b->size();
1960 scoped_ptr<uint8[]> buffer(new uint8[buffer_size]);
1961 uint8* dst = buffer.get();
1962 memcpy(dst, info_tracks.get(), info_tracks_size);
1963 dst += info_tracks_size;
1965 memcpy(dst, cluster_a->data(), cluster_a->size());
1966 dst += cluster_a->size();
1968 memcpy(dst, cluster_b->data(), cluster_b->size());
1969 dst += cluster_b->size();
1971 EXPECT_CALL(*this, InitSegmentReceived());
1972 AppendDataInPieces(buffer.get(), buffer_size);
1974 GenerateExpectedReads(0, 9);
1977 TEST_F(ChunkDemuxerTest, WebMFile_AudioAndVideo) {
1978 struct BufferTimestamps buffer_timestamps[] = {
1979 {0, 0},
1980 {33, 3},
1981 {67, 6},
1982 {100, 9},
1983 {133, 12},
1984 {kSkip, kSkip},
1987 // Expect duration adjustment since actual duration differs slightly from
1988 // duration in the init segment.
1989 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
1991 ASSERT_TRUE(ParseWebMFile("bear-320x240.webm", buffer_timestamps,
1992 base::TimeDelta::FromMilliseconds(2744)));
1995 TEST_F(ChunkDemuxerTest, WebMFile_LiveAudioAndVideo) {
1996 struct BufferTimestamps buffer_timestamps[] = {
1997 {0, 0},
1998 {33, 3},
1999 {67, 6},
2000 {100, 9},
2001 {133, 12},
2002 {kSkip, kSkip},
2005 ASSERT_TRUE(ParseWebMFile("bear-320x240-live.webm", buffer_timestamps,
2006 kInfiniteDuration()));
2008 DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
2009 EXPECT_EQ(DemuxerStream::LIVENESS_LIVE, audio->liveness());
2010 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2011 EXPECT_EQ(DemuxerStream::LIVENESS_LIVE, video->liveness());
2014 TEST_F(ChunkDemuxerTest, WebMFile_AudioOnly) {
2015 struct BufferTimestamps buffer_timestamps[] = {
2016 {kSkip, 0},
2017 {kSkip, 3},
2018 {kSkip, 6},
2019 {kSkip, 9},
2020 {kSkip, 12},
2021 {kSkip, kSkip},
2024 // Expect duration adjustment since actual duration differs slightly from
2025 // duration in the init segment.
2026 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
2028 ASSERT_TRUE(ParseWebMFile("bear-320x240-audio-only.webm", buffer_timestamps,
2029 base::TimeDelta::FromMilliseconds(2744),
2030 HAS_AUDIO));
2033 TEST_F(ChunkDemuxerTest, WebMFile_VideoOnly) {
2034 struct BufferTimestamps buffer_timestamps[] = {
2035 {0, kSkip},
2036 {33, kSkip},
2037 {67, kSkip},
2038 {100, kSkip},
2039 {133, kSkip},
2040 {kSkip, kSkip},
2043 // Expect duration adjustment since actual duration differs slightly from
2044 // duration in the init segment.
2045 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2736)));
2047 ASSERT_TRUE(ParseWebMFile("bear-320x240-video-only.webm", buffer_timestamps,
2048 base::TimeDelta::FromMilliseconds(2703),
2049 HAS_VIDEO));
2052 TEST_F(ChunkDemuxerTest, WebMFile_AltRefFrames) {
2053 struct BufferTimestamps buffer_timestamps[] = {
2054 {0, 0},
2055 {33, 3},
2056 {33, 6},
2057 {67, 9},
2058 {100, 12},
2059 {kSkip, kSkip},
2062 ASSERT_TRUE(ParseWebMFile("bear-320x240-altref.webm", buffer_timestamps,
2063 base::TimeDelta::FromMilliseconds(2767)));
2066 // Verify that we output buffers before the entire cluster has been parsed.
2067 TEST_F(ChunkDemuxerTest, IncrementalClusterParsing) {
2068 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2069 AppendEmptyCluster(0);
2071 scoped_ptr<Cluster> cluster(GenerateCluster(0, 6));
2073 bool audio_read_done = false;
2074 bool video_read_done = false;
2075 ReadAudio(base::Bind(&OnReadDone,
2076 base::TimeDelta::FromMilliseconds(0),
2077 &audio_read_done));
2078 ReadVideo(base::Bind(&OnReadDone,
2079 base::TimeDelta::FromMilliseconds(0),
2080 &video_read_done));
2082 // Make sure the reads haven't completed yet.
2083 EXPECT_FALSE(audio_read_done);
2084 EXPECT_FALSE(video_read_done);
2086 // Append data one byte at a time until one or both reads complete.
2087 int i = 0;
2088 for (; i < cluster->size() && !(audio_read_done || video_read_done); ++i) {
2089 AppendData(cluster->data() + i, 1);
2090 message_loop_.RunUntilIdle();
2093 EXPECT_TRUE(audio_read_done || video_read_done);
2094 EXPECT_GT(i, 0);
2095 EXPECT_LT(i, cluster->size());
2097 audio_read_done = false;
2098 video_read_done = false;
2099 ReadAudio(base::Bind(&OnReadDone,
2100 base::TimeDelta::FromMilliseconds(23),
2101 &audio_read_done));
2102 ReadVideo(base::Bind(&OnReadDone,
2103 base::TimeDelta::FromMilliseconds(33),
2104 &video_read_done));
2106 // Make sure the reads haven't completed yet.
2107 EXPECT_FALSE(audio_read_done);
2108 EXPECT_FALSE(video_read_done);
2110 // Append the remaining data.
2111 ASSERT_LT(i, cluster->size());
2112 AppendData(cluster->data() + i, cluster->size() - i);
2114 message_loop_.RunUntilIdle();
2116 EXPECT_TRUE(audio_read_done);
2117 EXPECT_TRUE(video_read_done);
2120 TEST_F(ChunkDemuxerTest, ParseErrorDuringInit) {
2121 EXPECT_CALL(*this, DemuxerOpened());
2122 demuxer_->Initialize(
2123 &host_, CreateInitDoneCB(
2124 kNoTimestamp(), PIPELINE_ERROR_DECODE), true);
2126 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
2128 uint8 tmp = 0;
2129 demuxer_->AppendData(kSourceId, &tmp, 1,
2130 append_window_start_for_next_append_,
2131 append_window_end_for_next_append_,
2132 &timestamp_offset_map_[kSourceId],
2133 init_segment_received_cb_);
2136 TEST_F(ChunkDemuxerTest, AVHeadersWithAudioOnlyType) {
2137 EXPECT_CALL(*this, DemuxerOpened());
2138 demuxer_->Initialize(
2139 &host_, CreateInitDoneCB(kNoTimestamp(),
2140 PIPELINE_ERROR_DECODE), true);
2142 std::vector<std::string> codecs(1);
2143 codecs[0] = "vorbis";
2144 ASSERT_EQ(demuxer_->AddId(kSourceId, "audio/webm", codecs),
2145 ChunkDemuxer::kOk);
2147 AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
2150 TEST_F(ChunkDemuxerTest, AVHeadersWithVideoOnlyType) {
2151 EXPECT_CALL(*this, DemuxerOpened());
2152 demuxer_->Initialize(
2153 &host_, CreateInitDoneCB(kNoTimestamp(),
2154 PIPELINE_ERROR_DECODE), true);
2156 std::vector<std::string> codecs(1);
2157 codecs[0] = "vp8";
2158 ASSERT_EQ(demuxer_->AddId(kSourceId, "video/webm", codecs),
2159 ChunkDemuxer::kOk);
2161 AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
2164 TEST_F(ChunkDemuxerTest, MultipleHeaders) {
2165 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2167 AppendCluster(kDefaultFirstCluster());
2169 // Append another identical initialization segment.
2170 EXPECT_CALL(*this, InitSegmentReceived());
2171 AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
2173 AppendCluster(kDefaultSecondCluster());
2175 GenerateExpectedReads(0, 9);
2178 TEST_F(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideo) {
2179 std::string audio_id = "audio1";
2180 std::string video_id = "video1";
2181 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2183 // Append audio and video data into separate source ids.
2184 AppendCluster(audio_id,
2185 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2186 GenerateAudioStreamExpectedReads(0, 4);
2187 AppendCluster(video_id,
2188 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2189 GenerateVideoStreamExpectedReads(0, 4);
2192 TEST_F(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideoText) {
2193 // TODO(matthewjheaney): Here and elsewhere, we need more tests
2194 // for inband text tracks (http://crbug/321455).
2196 std::string audio_id = "audio1";
2197 std::string video_id = "video1";
2199 EXPECT_CALL(host_, AddTextStream(_, _))
2200 .Times(Exactly(2));
2201 ASSERT_TRUE(InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, true));
2203 // Append audio and video data into separate source ids.
2204 AppendCluster(audio_id,
2205 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2206 GenerateAudioStreamExpectedReads(0, 4);
2207 AppendCluster(video_id,
2208 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2209 GenerateVideoStreamExpectedReads(0, 4);
2212 TEST_F(ChunkDemuxerTest, AddIdFailures) {
2213 EXPECT_CALL(*this, DemuxerOpened());
2214 demuxer_->Initialize(
2215 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2217 std::string audio_id = "audio1";
2218 std::string video_id = "video1";
2220 ASSERT_EQ(AddId(audio_id, HAS_AUDIO), ChunkDemuxer::kOk);
2222 // Adding an id with audio/video should fail because we already added audio.
2223 ASSERT_EQ(AddId(), ChunkDemuxer::kReachedIdLimit);
2225 EXPECT_CALL(*this, InitSegmentReceived());
2226 AppendInitSegmentWithSourceId(audio_id, HAS_AUDIO);
2228 // Adding an id after append should fail.
2229 ASSERT_EQ(AddId(video_id, HAS_VIDEO), ChunkDemuxer::kReachedIdLimit);
2232 // Test that Read() calls after a RemoveId() return "end of stream" buffers.
2233 TEST_F(ChunkDemuxerTest, RemoveId) {
2234 std::string audio_id = "audio1";
2235 std::string video_id = "video1";
2236 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2238 // Append audio and video data into separate source ids.
2239 AppendCluster(audio_id,
2240 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2241 AppendCluster(video_id,
2242 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2244 // Read() from audio should return normal buffers.
2245 GenerateAudioStreamExpectedReads(0, 4);
2247 // Remove the audio id.
2248 demuxer_->RemoveId(audio_id);
2250 // Read() from audio should return "end of stream" buffers.
2251 bool audio_read_done = false;
2252 ReadAudio(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
2253 message_loop_.RunUntilIdle();
2254 EXPECT_TRUE(audio_read_done);
2256 // Read() from video should still return normal buffers.
2257 GenerateVideoStreamExpectedReads(0, 4);
2260 // Test that removing an ID immediately after adding it does not interfere with
2261 // quota for new IDs in the future.
2262 TEST_F(ChunkDemuxerTest, RemoveAndAddId) {
2263 std::string audio_id_1 = "audio1";
2264 ASSERT_TRUE(AddId(audio_id_1, HAS_AUDIO) == ChunkDemuxer::kOk);
2265 demuxer_->RemoveId(audio_id_1);
2267 std::string audio_id_2 = "audio2";
2268 ASSERT_TRUE(AddId(audio_id_2, HAS_AUDIO) == ChunkDemuxer::kOk);
2271 TEST_F(ChunkDemuxerTest, SeekCanceled) {
2272 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2274 // Append cluster at the beginning of the stream.
2275 AppendCluster(GenerateCluster(0, 4));
2277 // Seek to an unbuffered region.
2278 Seek(base::TimeDelta::FromSeconds(50));
2280 // Attempt to read in unbuffered area; should not fulfill the read.
2281 bool audio_read_done = false;
2282 bool video_read_done = false;
2283 ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
2284 ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
2285 EXPECT_FALSE(audio_read_done);
2286 EXPECT_FALSE(video_read_done);
2288 // Now cancel the pending seek, which should flush the reads with empty
2289 // buffers.
2290 base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
2291 demuxer_->CancelPendingSeek(seek_time);
2292 message_loop_.RunUntilIdle();
2293 EXPECT_TRUE(audio_read_done);
2294 EXPECT_TRUE(video_read_done);
2296 // A seek back to the buffered region should succeed.
2297 Seek(seek_time);
2298 GenerateExpectedReads(0, 4);
2301 TEST_F(ChunkDemuxerTest, SeekCanceledWhileWaitingForSeek) {
2302 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2304 // Append cluster at the beginning of the stream.
2305 AppendCluster(GenerateCluster(0, 4));
2307 // Start waiting for a seek.
2308 base::TimeDelta seek_time1 = base::TimeDelta::FromSeconds(50);
2309 base::TimeDelta seek_time2 = base::TimeDelta::FromSeconds(0);
2310 demuxer_->StartWaitingForSeek(seek_time1);
2312 // Now cancel the upcoming seek to an unbuffered region.
2313 demuxer_->CancelPendingSeek(seek_time2);
2314 demuxer_->Seek(seek_time1, NewExpectedStatusCB(PIPELINE_OK));
2316 // Read requests should be fulfilled with empty buffers.
2317 bool audio_read_done = false;
2318 bool video_read_done = false;
2319 ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
2320 ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
2321 EXPECT_TRUE(audio_read_done);
2322 EXPECT_TRUE(video_read_done);
2324 // A seek back to the buffered region should succeed.
2325 Seek(seek_time2);
2326 GenerateExpectedReads(0, 4);
2329 // Test that Seek() successfully seeks to all source IDs.
2330 TEST_F(ChunkDemuxerTest, SeekAudioAndVideoSources) {
2331 std::string audio_id = "audio1";
2332 std::string video_id = "video1";
2333 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2335 AppendCluster(
2336 audio_id,
2337 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2338 AppendCluster(
2339 video_id,
2340 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2342 // Read() should return buffers at 0.
2343 bool audio_read_done = false;
2344 bool video_read_done = false;
2345 ReadAudio(base::Bind(&OnReadDone,
2346 base::TimeDelta::FromMilliseconds(0),
2347 &audio_read_done));
2348 ReadVideo(base::Bind(&OnReadDone,
2349 base::TimeDelta::FromMilliseconds(0),
2350 &video_read_done));
2351 EXPECT_TRUE(audio_read_done);
2352 EXPECT_TRUE(video_read_done);
2354 // Seek to 3 (an unbuffered region).
2355 Seek(base::TimeDelta::FromSeconds(3));
2357 audio_read_done = false;
2358 video_read_done = false;
2359 ReadAudio(base::Bind(&OnReadDone,
2360 base::TimeDelta::FromSeconds(3),
2361 &audio_read_done));
2362 ReadVideo(base::Bind(&OnReadDone,
2363 base::TimeDelta::FromSeconds(3),
2364 &video_read_done));
2365 // Read()s should not return until after data is appended at the Seek point.
2366 EXPECT_FALSE(audio_read_done);
2367 EXPECT_FALSE(video_read_done);
2369 AppendCluster(audio_id,
2370 GenerateSingleStreamCluster(
2371 3000, 3092, kAudioTrackNum, kAudioBlockDuration));
2372 AppendCluster(video_id,
2373 GenerateSingleStreamCluster(
2374 3000, 3132, kVideoTrackNum, kVideoBlockDuration));
2376 message_loop_.RunUntilIdle();
2378 // Read() should return buffers at 3.
2379 EXPECT_TRUE(audio_read_done);
2380 EXPECT_TRUE(video_read_done);
2383 // Test that Seek() completes successfully when EndOfStream
2384 // is called before data is available for that seek point.
2385 // This scenario might be useful if seeking past the end of stream
2386 // of either audio or video (or both).
2387 TEST_F(ChunkDemuxerTest, EndOfStreamAfterPastEosSeek) {
2388 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2390 AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
2391 AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
2393 // Seeking past the end of video.
2394 // Note: audio data is available for that seek point.
2395 bool seek_cb_was_called = false;
2396 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(110);
2397 demuxer_->StartWaitingForSeek(seek_time);
2398 demuxer_->Seek(seek_time,
2399 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
2400 message_loop_.RunUntilIdle();
2402 EXPECT_FALSE(seek_cb_was_called);
2404 EXPECT_CALL(host_, SetDuration(
2405 base::TimeDelta::FromMilliseconds(120)));
2406 MarkEndOfStream(PIPELINE_OK);
2407 message_loop_.RunUntilIdle();
2409 EXPECT_TRUE(seek_cb_was_called);
2411 ShutdownDemuxer();
2414 // Test that EndOfStream is ignored if coming during a pending seek
2415 // whose seek time is before some existing ranges.
2416 TEST_F(ChunkDemuxerTest, EndOfStreamDuringPendingSeek) {
2417 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2419 AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
2420 AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
2421 AppendCluster(GenerateSingleStreamCluster(200, 300, kAudioTrackNum, 10));
2422 AppendCluster(GenerateSingleStreamCluster(200, 300, kVideoTrackNum, 5));
2424 bool seek_cb_was_called = false;
2425 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(160);
2426 demuxer_->StartWaitingForSeek(seek_time);
2427 demuxer_->Seek(seek_time,
2428 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
2429 message_loop_.RunUntilIdle();
2431 EXPECT_FALSE(seek_cb_was_called);
2433 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(300)));
2434 MarkEndOfStream(PIPELINE_OK);
2435 message_loop_.RunUntilIdle();
2437 EXPECT_FALSE(seek_cb_was_called);
2439 demuxer_->UnmarkEndOfStream();
2441 AppendCluster(GenerateSingleStreamCluster(140, 180, kAudioTrackNum, 10));
2442 AppendCluster(GenerateSingleStreamCluster(140, 180, kVideoTrackNum, 5));
2444 message_loop_.RunUntilIdle();
2446 EXPECT_TRUE(seek_cb_was_called);
2448 ShutdownDemuxer();
2451 // Test ranges in an audio-only stream.
2452 TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioIdOnly) {
2453 EXPECT_CALL(*this, DemuxerOpened());
2454 demuxer_->Initialize(
2455 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2457 ASSERT_EQ(AddId(kSourceId, HAS_AUDIO), ChunkDemuxer::kOk);
2458 EXPECT_CALL(*this, InitSegmentReceived());
2459 AppendInitSegment(HAS_AUDIO);
2461 // Test a simple cluster.
2462 AppendCluster(
2463 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2465 CheckExpectedRanges("{ [0,92) }");
2467 // Append a disjoint cluster to check for two separate ranges.
2468 AppendCluster(GenerateSingleStreamCluster(
2469 150, 219, kAudioTrackNum, kAudioBlockDuration));
2471 CheckExpectedRanges("{ [0,92) [150,219) }");
2474 // Test ranges in a video-only stream.
2475 TEST_F(ChunkDemuxerTest, GetBufferedRanges_VideoIdOnly) {
2476 EXPECT_CALL(*this, DemuxerOpened());
2477 demuxer_->Initialize(
2478 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2480 ASSERT_EQ(AddId(kSourceId, HAS_VIDEO), ChunkDemuxer::kOk);
2481 EXPECT_CALL(*this, InitSegmentReceived());
2482 AppendInitSegment(HAS_VIDEO);
2484 // Test a simple cluster.
2485 AppendCluster(
2486 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2488 CheckExpectedRanges("{ [0,132) }");
2490 // Append a disjoint cluster to check for two separate ranges.
2491 AppendCluster(GenerateSingleStreamCluster(
2492 200, 299, kVideoTrackNum, kVideoBlockDuration));
2494 CheckExpectedRanges("{ [0,132) [200,299) }");
2497 TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideo) {
2498 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2500 // Audio: 0 -> 23
2501 // Video: 0 -> 33
2502 // Buffered Range: 0 -> 23
2503 // Audio block duration is smaller than video block duration,
2504 // so the buffered ranges should correspond to the audio blocks.
2505 AppendCluster(GenerateSingleStreamCluster(
2506 0, kAudioBlockDuration, kAudioTrackNum, kAudioBlockDuration));
2507 AppendCluster(GenerateSingleStreamCluster(
2508 0, kVideoBlockDuration, kVideoTrackNum, kVideoBlockDuration));
2510 CheckExpectedRanges("{ [0,23) }");
2512 // Audio: 300 -> 400
2513 // Video: 320 -> 420
2514 // Buffered Range: 320 -> 400 (end overlap)
2515 AppendCluster(GenerateSingleStreamCluster(300, 400, kAudioTrackNum, 50));
2516 AppendCluster(GenerateSingleStreamCluster(320, 420, kVideoTrackNum, 50));
2518 CheckExpectedRanges("{ [0,23) [320,400) }");
2520 // Audio: 520 -> 590
2521 // Video: 500 -> 570
2522 // Buffered Range: 520 -> 570 (front overlap)
2523 AppendCluster(GenerateSingleStreamCluster(520, 590, kAudioTrackNum, 70));
2524 AppendCluster(GenerateSingleStreamCluster(500, 570, kVideoTrackNum, 70));
2526 CheckExpectedRanges("{ [0,23) [320,400) [520,570) }");
2528 // Audio: 720 -> 750
2529 // Video: 700 -> 770
2530 // Buffered Range: 720 -> 750 (complete overlap, audio)
2531 AppendCluster(GenerateSingleStreamCluster(720, 750, kAudioTrackNum, 30));
2532 AppendCluster(GenerateSingleStreamCluster(700, 770, kVideoTrackNum, 70));
2534 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) }");
2536 // Audio: 900 -> 970
2537 // Video: 920 -> 950
2538 // Buffered Range: 920 -> 950 (complete overlap, video)
2539 AppendCluster(GenerateSingleStreamCluster(900, 970, kAudioTrackNum, 70));
2540 AppendCluster(GenerateSingleStreamCluster(920, 950, kVideoTrackNum, 30));
2542 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2544 // Appending within buffered range should not affect buffered ranges.
2545 AppendCluster(GenerateSingleStreamCluster(930, 950, kAudioTrackNum, 20));
2546 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2548 // Appending to single stream outside buffered ranges should not affect
2549 // buffered ranges.
2550 AppendCluster(GenerateSingleStreamCluster(1230, 1240, kVideoTrackNum, 10));
2551 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2554 TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideoText) {
2555 EXPECT_CALL(host_, AddTextStream(_, _));
2556 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
2558 // Append audio & video data
2559 AppendMuxedCluster(
2560 MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
2561 MuxedStreamInfo(kVideoTrackNum, "0K 33"));
2563 // Verify that a text track with no cues does not result in an empty buffered
2564 // range.
2565 CheckExpectedRanges("{ [0,46) }");
2567 // Add some text cues.
2568 AppendMuxedCluster(
2569 MuxedStreamInfo(kAudioTrackNum, "100K 123K"),
2570 MuxedStreamInfo(kVideoTrackNum, "100K 133"),
2571 MuxedStreamInfo(kTextTrackNum, "100K 200K"));
2573 // Verify that the text cues are not reflected in the buffered ranges.
2574 CheckExpectedRanges("{ [0,46) [100,146) }");
2576 // Remove the buffered ranges.
2577 demuxer_->Remove(kSourceId, base::TimeDelta(),
2578 base::TimeDelta::FromMilliseconds(250));
2579 CheckExpectedRanges("{ }");
2582 // Once MarkEndOfStream() is called, GetBufferedRanges should not cut off any
2583 // over-hanging tails at the end of the ranges as this is likely due to block
2584 // duration differences.
2585 TEST_F(ChunkDemuxerTest, GetBufferedRanges_EndOfStream) {
2586 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2588 AppendMuxedCluster(
2589 MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
2590 MuxedStreamInfo(kVideoTrackNum, "0K 33"));
2592 CheckExpectedRanges("{ [0,46) }");
2594 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
2595 MarkEndOfStream(PIPELINE_OK);
2597 // Verify that the range extends to the end of the video data.
2598 CheckExpectedRanges("{ [0,66) }");
2600 // Verify that the range reverts to the intersection when end of stream
2601 // has been cancelled.
2602 demuxer_->UnmarkEndOfStream();
2603 CheckExpectedRanges("{ [0,46) }");
2605 // Append and remove data so that the 2 streams' end ranges do not overlap.
2607 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(398)));
2608 AppendMuxedCluster(
2609 MuxedStreamInfo(kAudioTrackNum, "200K 223K"),
2610 MuxedStreamInfo(kVideoTrackNum, "200K 233 266 299 332K 365"));
2612 // At this point, the per-stream ranges are as follows:
2613 // Audio: [0,46) [200,246)
2614 // Video: [0,66) [200,398)
2615 CheckExpectedRanges("{ [0,46) [200,246) }");
2617 demuxer_->Remove(kSourceId, base::TimeDelta::FromMilliseconds(200),
2618 base::TimeDelta::FromMilliseconds(300));
2620 // At this point, the per-stream ranges are as follows:
2621 // Audio: [0,46)
2622 // Video: [0,66) [332,398)
2623 CheckExpectedRanges("{ [0,46) }");
2625 AppendMuxedCluster(
2626 MuxedStreamInfo(kAudioTrackNum, "200K 223K"),
2627 MuxedStreamInfo(kVideoTrackNum, "200K 233"));
2629 // At this point, the per-stream ranges are as follows:
2630 // Audio: [0,46) [200,246)
2631 // Video: [0,66) [200,266) [332,398)
2632 // NOTE: The last range on each stream do not overlap in time.
2633 CheckExpectedRanges("{ [0,46) [200,246) }");
2635 MarkEndOfStream(PIPELINE_OK);
2637 // NOTE: The last range on each stream gets extended to the highest
2638 // end timestamp according to the spec. The last audio range gets extended
2639 // from [200,246) to [200,398) which is why the intersection results in the
2640 // middle range getting larger AND the new range appearing.
2641 CheckExpectedRanges("{ [0,46) [200,266) [332,398) }");
2644 TEST_F(ChunkDemuxerTest, DifferentStreamTimecodes) {
2645 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2647 // Create a cluster where the video timecode begins 25ms after the audio.
2648 AppendCluster(GenerateCluster(0, 25, 8));
2650 Seek(base::TimeDelta::FromSeconds(0));
2651 GenerateExpectedReads(0, 25, 8);
2653 // Seek to 5 seconds.
2654 Seek(base::TimeDelta::FromSeconds(5));
2656 // Generate a cluster to fulfill this seek, where audio timecode begins 25ms
2657 // after the video.
2658 AppendCluster(GenerateCluster(5025, 5000, 8));
2659 GenerateExpectedReads(5025, 5000, 8);
2662 TEST_F(ChunkDemuxerTest, DifferentStreamTimecodesSeparateSources) {
2663 std::string audio_id = "audio1";
2664 std::string video_id = "video1";
2665 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2667 // Generate two streams where the video stream starts 5ms after the audio
2668 // stream and append them.
2669 AppendCluster(audio_id, GenerateSingleStreamCluster(
2670 25, 4 * kAudioBlockDuration + 25, kAudioTrackNum, kAudioBlockDuration));
2671 AppendCluster(video_id, GenerateSingleStreamCluster(
2672 30, 4 * kVideoBlockDuration + 30, kVideoTrackNum, kVideoBlockDuration));
2674 // Both streams should be able to fulfill a seek to 25.
2675 Seek(base::TimeDelta::FromMilliseconds(25));
2676 GenerateAudioStreamExpectedReads(25, 4);
2677 GenerateVideoStreamExpectedReads(30, 4);
2680 TEST_F(ChunkDemuxerTest, DifferentStreamTimecodesOutOfRange) {
2681 std::string audio_id = "audio1";
2682 std::string video_id = "video1";
2683 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2685 // Generate two streams where the video stream starts 10s after the audio
2686 // stream and append them.
2687 AppendCluster(audio_id, GenerateSingleStreamCluster(0,
2688 4 * kAudioBlockDuration + 0, kAudioTrackNum, kAudioBlockDuration));
2689 AppendCluster(video_id, GenerateSingleStreamCluster(10000,
2690 4 * kVideoBlockDuration + 10000, kVideoTrackNum, kVideoBlockDuration));
2692 // Should not be able to fulfill a seek to 0.
2693 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(0);
2694 demuxer_->StartWaitingForSeek(seek_time);
2695 demuxer_->Seek(seek_time,
2696 NewExpectedStatusCB(PIPELINE_ERROR_ABORT));
2697 ExpectRead(DemuxerStream::AUDIO, 0);
2698 ExpectEndOfStream(DemuxerStream::VIDEO);
2701 TEST_F(ChunkDemuxerTest, ClusterWithNoBuffers) {
2702 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2704 // Generate and append an empty cluster beginning at 0.
2705 AppendEmptyCluster(0);
2707 // Sanity check that data can be appended after this cluster correctly.
2708 AppendCluster(GenerateCluster(0, 2));
2709 ExpectRead(DemuxerStream::AUDIO, 0);
2710 ExpectRead(DemuxerStream::VIDEO, 0);
2713 TEST_F(ChunkDemuxerTest, CodecPrefixMatching) {
2714 ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
2716 #if defined(USE_PROPRIETARY_CODECS)
2717 expected = ChunkDemuxer::kOk;
2718 #endif
2720 std::vector<std::string> codecs;
2721 codecs.push_back("avc1.4D4041");
2723 EXPECT_EQ(demuxer_->AddId("source_id", "video/mp4", codecs), expected);
2726 // Test codec ID's that are not compliant with RFC6381, but have been
2727 // seen in the wild.
2728 TEST_F(ChunkDemuxerTest, CodecIDsThatAreNotRFC6381Compliant) {
2729 ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
2731 #if defined(USE_PROPRIETARY_CODECS)
2732 expected = ChunkDemuxer::kOk;
2733 #endif
2734 const char* codec_ids[] = {
2735 // GPAC places leading zeros on the audio object type.
2736 "mp4a.40.02",
2737 "mp4a.40.05"
2740 for (size_t i = 0; i < arraysize(codec_ids); ++i) {
2741 std::vector<std::string> codecs;
2742 codecs.push_back(codec_ids[i]);
2744 ChunkDemuxer::Status result =
2745 demuxer_->AddId("source_id", "audio/mp4", codecs);
2747 EXPECT_EQ(result, expected)
2748 << "Fail to add codec_id '" << codec_ids[i] << "'";
2750 if (result == ChunkDemuxer::kOk)
2751 demuxer_->RemoveId("source_id");
2755 TEST_F(ChunkDemuxerTest, EndOfStreamStillSetAfterSeek) {
2756 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2758 EXPECT_CALL(host_, SetDuration(_))
2759 .Times(AnyNumber());
2761 base::TimeDelta kLastAudioTimestamp = base::TimeDelta::FromMilliseconds(92);
2762 base::TimeDelta kLastVideoTimestamp = base::TimeDelta::FromMilliseconds(99);
2764 AppendCluster(kDefaultFirstCluster());
2765 AppendCluster(kDefaultSecondCluster());
2766 MarkEndOfStream(PIPELINE_OK);
2768 DemuxerStream::Status status;
2769 base::TimeDelta last_timestamp;
2771 // Verify that we can read audio & video to the end w/o problems.
2772 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2773 EXPECT_EQ(DemuxerStream::kOk, status);
2774 EXPECT_EQ(kLastAudioTimestamp, last_timestamp);
2776 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2777 EXPECT_EQ(DemuxerStream::kOk, status);
2778 EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
2780 // Seek back to 0 and verify that we can read to the end again..
2781 Seek(base::TimeDelta::FromMilliseconds(0));
2783 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2784 EXPECT_EQ(DemuxerStream::kOk, status);
2785 EXPECT_EQ(kLastAudioTimestamp, last_timestamp);
2787 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2788 EXPECT_EQ(DemuxerStream::kOk, status);
2789 EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
2792 TEST_F(ChunkDemuxerTest, GetBufferedRangesBeforeInitSegment) {
2793 EXPECT_CALL(*this, DemuxerOpened());
2794 demuxer_->Initialize(&host_, CreateInitDoneCB(PIPELINE_OK), true);
2795 ASSERT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
2796 ASSERT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
2798 CheckExpectedRanges("audio", "{ }");
2799 CheckExpectedRanges("video", "{ }");
2802 // Test that Seek() completes successfully when the first cluster
2803 // arrives.
2804 TEST_F(ChunkDemuxerTest, EndOfStreamDuringSeek) {
2805 InSequence s;
2807 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2809 AppendCluster(kDefaultFirstCluster());
2811 base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
2812 demuxer_->StartWaitingForSeek(seek_time);
2814 AppendCluster(kDefaultSecondCluster());
2815 EXPECT_CALL(host_, SetDuration(
2816 base::TimeDelta::FromMilliseconds(kDefaultSecondClusterEndTimestamp)));
2817 MarkEndOfStream(PIPELINE_OK);
2819 demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
2821 GenerateExpectedReads(0, 4);
2822 GenerateExpectedReads(46, 66, 5);
2824 EndOfStreamHelper end_of_stream_helper(demuxer_.get());
2825 end_of_stream_helper.RequestReads();
2826 end_of_stream_helper.CheckIfReadDonesWereCalled(true);
2829 TEST_F(ChunkDemuxerTest, ConfigChange_Video) {
2830 InSequence s;
2832 ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2834 DemuxerStream::Status status;
2835 base::TimeDelta last_timestamp;
2837 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2839 // Fetch initial video config and verify it matches what we expect.
2840 const VideoDecoderConfig& video_config_1 = video->video_decoder_config();
2841 ASSERT_TRUE(video_config_1.IsValidConfig());
2842 EXPECT_EQ(video_config_1.natural_size().width(), 320);
2843 EXPECT_EQ(video_config_1.natural_size().height(), 240);
2845 ExpectRead(DemuxerStream::VIDEO, 0);
2847 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2849 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2850 EXPECT_EQ(last_timestamp.InMilliseconds(), 501);
2852 // Fetch the new decoder config.
2853 const VideoDecoderConfig& video_config_2 = video->video_decoder_config();
2854 ASSERT_TRUE(video_config_2.IsValidConfig());
2855 EXPECT_EQ(video_config_2.natural_size().width(), 640);
2856 EXPECT_EQ(video_config_2.natural_size().height(), 360);
2858 ExpectRead(DemuxerStream::VIDEO, 527);
2860 // Read until the next config change.
2861 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2862 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2863 EXPECT_EQ(last_timestamp.InMilliseconds(), 793);
2865 // Get the new config and verify that it matches the first one.
2866 ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2868 ExpectRead(DemuxerStream::VIDEO, 801);
2870 // Read until the end of the stream just to make sure there aren't any other
2871 // config changes.
2872 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2873 ASSERT_EQ(status, DemuxerStream::kOk);
2876 TEST_F(ChunkDemuxerTest, ConfigChange_Audio) {
2877 InSequence s;
2879 ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2881 DemuxerStream::Status status;
2882 base::TimeDelta last_timestamp;
2884 DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
2886 // Fetch initial audio config and verify it matches what we expect.
2887 const AudioDecoderConfig& audio_config_1 = audio->audio_decoder_config();
2888 ASSERT_TRUE(audio_config_1.IsValidConfig());
2889 EXPECT_EQ(audio_config_1.samples_per_second(), 44100);
2890 EXPECT_EQ(audio_config_1.extra_data_size(), 3863u);
2892 ExpectRead(DemuxerStream::AUDIO, 0);
2894 // The first config change seen is from a splice frame representing an overlap
2895 // of buffer from config 1 by buffers from config 2.
2896 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2897 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2898 EXPECT_EQ(last_timestamp.InMilliseconds(), 524);
2900 // Fetch the new decoder config.
2901 const AudioDecoderConfig& audio_config_2 = audio->audio_decoder_config();
2902 ASSERT_TRUE(audio_config_2.IsValidConfig());
2903 EXPECT_EQ(audio_config_2.samples_per_second(), 44100);
2904 EXPECT_EQ(audio_config_2.extra_data_size(), 3935u);
2906 // The next config change is from a splice frame representing an overlap of
2907 // buffers from config 2 by buffers from config 1.
2908 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2909 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2910 EXPECT_EQ(last_timestamp.InMilliseconds(), 782);
2911 ASSERT_TRUE(audio_config_1.Matches(audio->audio_decoder_config()));
2913 // Read until the end of the stream just to make sure there aren't any other
2914 // config changes.
2915 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2916 ASSERT_EQ(status, DemuxerStream::kOk);
2917 EXPECT_EQ(last_timestamp.InMilliseconds(), 2744);
2920 TEST_F(ChunkDemuxerTest, ConfigChange_Seek) {
2921 InSequence s;
2923 ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2925 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2927 // Fetch initial video config and verify it matches what we expect.
2928 const VideoDecoderConfig& video_config_1 = video->video_decoder_config();
2929 ASSERT_TRUE(video_config_1.IsValidConfig());
2930 EXPECT_EQ(video_config_1.natural_size().width(), 320);
2931 EXPECT_EQ(video_config_1.natural_size().height(), 240);
2933 ExpectRead(DemuxerStream::VIDEO, 0);
2935 // Seek to a location with a different config.
2936 Seek(base::TimeDelta::FromMilliseconds(527));
2938 // Verify that the config change is signalled.
2939 ExpectConfigChanged(DemuxerStream::VIDEO);
2941 // Fetch the new decoder config and verify it is what we expect.
2942 const VideoDecoderConfig& video_config_2 = video->video_decoder_config();
2943 ASSERT_TRUE(video_config_2.IsValidConfig());
2944 EXPECT_EQ(video_config_2.natural_size().width(), 640);
2945 EXPECT_EQ(video_config_2.natural_size().height(), 360);
2947 // Verify that Read() will return a buffer now.
2948 ExpectRead(DemuxerStream::VIDEO, 527);
2950 // Seek back to the beginning and verify we get another config change.
2951 Seek(base::TimeDelta::FromMilliseconds(0));
2952 ExpectConfigChanged(DemuxerStream::VIDEO);
2953 ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2954 ExpectRead(DemuxerStream::VIDEO, 0);
2956 // Seek to a location that requires a config change and then
2957 // seek to a new location that has the same configuration as
2958 // the start of the file without a Read() in the middle.
2959 Seek(base::TimeDelta::FromMilliseconds(527));
2960 Seek(base::TimeDelta::FromMilliseconds(801));
2962 // Verify that no config change is signalled.
2963 ExpectRead(DemuxerStream::VIDEO, 801);
2964 ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2967 TEST_F(ChunkDemuxerTest, TimestampPositiveOffset) {
2968 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2970 ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(30)));
2971 AppendCluster(GenerateCluster(0, 2));
2973 Seek(base::TimeDelta::FromMilliseconds(30000));
2975 GenerateExpectedReads(30000, 2);
2978 TEST_F(ChunkDemuxerTest, TimestampNegativeOffset) {
2979 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2981 ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(-1)));
2982 AppendCluster(GenerateCluster(1000, 2));
2984 GenerateExpectedReads(0, 2);
2987 TEST_F(ChunkDemuxerTest, TimestampOffsetSeparateStreams) {
2988 std::string audio_id = "audio1";
2989 std::string video_id = "video1";
2990 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2992 ASSERT_TRUE(SetTimestampOffset(
2993 audio_id, base::TimeDelta::FromMilliseconds(-2500)));
2994 ASSERT_TRUE(SetTimestampOffset(
2995 video_id, base::TimeDelta::FromMilliseconds(-2500)));
2996 AppendCluster(audio_id, GenerateSingleStreamCluster(2500,
2997 2500 + kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
2998 AppendCluster(video_id, GenerateSingleStreamCluster(2500,
2999 2500 + kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
3000 GenerateAudioStreamExpectedReads(0, 4);
3001 GenerateVideoStreamExpectedReads(0, 4);
3003 Seek(base::TimeDelta::FromMilliseconds(27300));
3005 ASSERT_TRUE(SetTimestampOffset(
3006 audio_id, base::TimeDelta::FromMilliseconds(27300)));
3007 ASSERT_TRUE(SetTimestampOffset(
3008 video_id, base::TimeDelta::FromMilliseconds(27300)));
3009 AppendCluster(audio_id, GenerateSingleStreamCluster(
3010 0, kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
3011 AppendCluster(video_id, GenerateSingleStreamCluster(
3012 0, kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
3013 GenerateVideoStreamExpectedReads(27300, 4);
3014 GenerateAudioStreamExpectedReads(27300, 4);
3017 TEST_F(ChunkDemuxerTest, IsParsingMediaSegmentMidMediaSegment) {
3018 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3020 scoped_ptr<Cluster> cluster = GenerateCluster(0, 2);
3021 // Append only part of the cluster data.
3022 AppendData(cluster->data(), cluster->size() - 13);
3024 // Confirm we're in the middle of parsing a media segment.
3025 ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
3027 demuxer_->ResetParserState(kSourceId,
3028 append_window_start_for_next_append_,
3029 append_window_end_for_next_append_,
3030 &timestamp_offset_map_[kSourceId]);
3032 // After ResetParserState(), parsing should no longer be in the middle of a
3033 // media segment.
3034 ASSERT_FALSE(demuxer_->IsParsingMediaSegment(kSourceId));
3037 #if defined(USE_PROPRIETARY_CODECS)
3038 #if defined(ENABLE_MPEG2TS_STREAM_PARSER)
3039 TEST_F(ChunkDemuxerTest, EmitBuffersDuringAbort) {
3040 EXPECT_CALL(*this, DemuxerOpened());
3041 demuxer_->Initialize(
3042 &host_, CreateInitDoneCB(kInfiniteDuration(), PIPELINE_OK), true);
3043 EXPECT_EQ(ChunkDemuxer::kOk, AddIdForMp2tSource(kSourceId));
3045 // For info:
3046 // DTS/PTS derived using dvbsnoop -s ts -if bear-1280x720.ts -tssubdecode
3047 // Video: first PES:
3048 // PTS: 126912 (0x0001efc0) [= 90 kHz-Timestamp: 0:00:01.4101]
3049 // DTS: 123909 (0x0001e405) [= 90 kHz-Timestamp: 0:00:01.3767]
3050 // Audio: first PES:
3051 // PTS: 126000 (0x0001ec30) [= 90 kHz-Timestamp: 0:00:01.4000]
3052 // DTS: 123910 (0x0001e406) [= 90 kHz-Timestamp: 0:00:01.3767]
3053 // Video: last PES:
3054 // PTS: 370155 (0x0005a5eb) [= 90 kHz-Timestamp: 0:00:04.1128]
3055 // DTS: 367152 (0x00059a30) [= 90 kHz-Timestamp: 0:00:04.0794]
3056 // Audio: last PES:
3057 // PTS: 353788 (0x000565fc) [= 90 kHz-Timestamp: 0:00:03.9309]
3059 scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("bear-1280x720.ts");
3060 EXPECT_CALL(*this, InitSegmentReceived());
3061 AppendData(kSourceId, buffer->data(), buffer->data_size());
3063 // Confirm we're in the middle of parsing a media segment.
3064 ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
3066 // ResetParserState on the Mpeg2 TS parser triggers the emission of the last
3067 // video buffer which is pending in the stream parser.
3068 Ranges<base::TimeDelta> range_before_abort =
3069 demuxer_->GetBufferedRanges(kSourceId);
3070 demuxer_->ResetParserState(kSourceId,
3071 append_window_start_for_next_append_,
3072 append_window_end_for_next_append_,
3073 &timestamp_offset_map_[kSourceId]);
3074 Ranges<base::TimeDelta> range_after_abort =
3075 demuxer_->GetBufferedRanges(kSourceId);
3077 ASSERT_EQ(range_before_abort.size(), 1u);
3078 ASSERT_EQ(range_after_abort.size(), 1u);
3079 EXPECT_EQ(range_after_abort.start(0), range_before_abort.start(0));
3080 EXPECT_GT(range_after_abort.end(0), range_before_abort.end(0));
3083 TEST_F(ChunkDemuxerTest, SeekCompleteDuringAbort) {
3084 EXPECT_CALL(*this, DemuxerOpened());
3085 demuxer_->Initialize(
3086 &host_, CreateInitDoneCB(kInfiniteDuration(), PIPELINE_OK), true);
3087 EXPECT_EQ(ChunkDemuxer::kOk, AddIdForMp2tSource(kSourceId));
3089 // For info:
3090 // DTS/PTS derived using dvbsnoop -s ts -if bear-1280x720.ts -tssubdecode
3091 // Video: first PES:
3092 // PTS: 126912 (0x0001efc0) [= 90 kHz-Timestamp: 0:00:01.4101]
3093 // DTS: 123909 (0x0001e405) [= 90 kHz-Timestamp: 0:00:01.3767]
3094 // Audio: first PES:
3095 // PTS: 126000 (0x0001ec30) [= 90 kHz-Timestamp: 0:00:01.4000]
3096 // DTS: 123910 (0x0001e406) [= 90 kHz-Timestamp: 0:00:01.3767]
3097 // Video: last PES:
3098 // PTS: 370155 (0x0005a5eb) [= 90 kHz-Timestamp: 0:00:04.1128]
3099 // DTS: 367152 (0x00059a30) [= 90 kHz-Timestamp: 0:00:04.0794]
3100 // Audio: last PES:
3101 // PTS: 353788 (0x000565fc) [= 90 kHz-Timestamp: 0:00:03.9309]
3103 scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("bear-1280x720.ts");
3104 EXPECT_CALL(*this, InitSegmentReceived());
3105 AppendData(kSourceId, buffer->data(), buffer->data_size());
3107 // Confirm we're in the middle of parsing a media segment.
3108 ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
3110 // Seek to a time corresponding to buffers that will be emitted during the
3111 // abort.
3112 Seek(base::TimeDelta::FromMilliseconds(4110));
3114 // ResetParserState on the Mpeg2 TS parser triggers the emission of the last
3115 // video buffer which is pending in the stream parser.
3116 demuxer_->ResetParserState(kSourceId,
3117 append_window_start_for_next_append_,
3118 append_window_end_for_next_append_,
3119 &timestamp_offset_map_[kSourceId]);
3122 #endif
3123 #endif
3125 TEST_F(ChunkDemuxerTest, WebMIsParsingMediaSegmentDetection) {
3126 const uint8 kBuffer[] = {
3127 0x1F, 0x43, 0xB6, 0x75, 0x83, // CLUSTER (size = 3)
3128 0xE7, 0x81, 0x01, // Cluster TIMECODE (value = 1)
3130 0x1F, 0x43, 0xB6, 0x75, 0xFF, // CLUSTER (size = unknown; really 3 due to:)
3131 0xE7, 0x81, 0x02, // Cluster TIMECODE (value = 2)
3132 /* e.g. put some blocks here... */
3133 0x1A, 0x45, 0xDF, 0xA3, 0x8A, // EBMLHEADER (size = 10, not fully appended)
3136 // This array indicates expected return value of IsParsingMediaSegment()
3137 // following each incrementally appended byte in |kBuffer|.
3138 const bool kExpectedReturnValues[] = {
3139 false, false, false, false, true,
3140 true, true, false,
3142 false, false, false, false, true,
3143 true, true, true,
3145 true, true, true, true, false,
3148 static_assert(arraysize(kBuffer) == arraysize(kExpectedReturnValues),
3149 "test arrays out of sync");
3150 static_assert(arraysize(kBuffer) == sizeof(kBuffer),
3151 "there should be one byte per index");
3153 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3155 for (size_t i = 0; i < sizeof(kBuffer); i++) {
3156 DVLOG(3) << "Appending and testing index " << i;
3157 AppendData(kBuffer + i, 1);
3158 bool expected_return_value = kExpectedReturnValues[i];
3159 EXPECT_EQ(expected_return_value,
3160 demuxer_->IsParsingMediaSegment(kSourceId));
3164 TEST_F(ChunkDemuxerTest, DurationChange) {
3165 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3166 const int kStreamDuration = kDefaultDuration().InMilliseconds();
3168 // Add data leading up to the currently set duration.
3169 AppendCluster(GenerateCluster(kStreamDuration - kAudioBlockDuration,
3170 kStreamDuration - kVideoBlockDuration,
3171 2));
3173 CheckExpectedRanges(kSourceId, "{ [201191,201224) }");
3175 // Add data beginning at the currently set duration and expect a new duration
3176 // to be signaled. Note that the last video block will have a higher end
3177 // timestamp than the last audio block.
3178 const int kNewStreamDurationVideo = kStreamDuration + kVideoBlockDuration;
3179 EXPECT_CALL(host_, SetDuration(
3180 base::TimeDelta::FromMilliseconds(kNewStreamDurationVideo)));
3181 AppendCluster(GenerateCluster(kDefaultDuration().InMilliseconds(), 2));
3183 CheckExpectedRanges(kSourceId, "{ [201191,201247) }");
3185 // Add more data to the end of each media type. Note that the last audio block
3186 // will have a higher end timestamp than the last video block.
3187 const int kFinalStreamDuration = kStreamDuration + kAudioBlockDuration * 3;
3188 EXPECT_CALL(host_, SetDuration(
3189 base::TimeDelta::FromMilliseconds(kFinalStreamDuration)));
3190 AppendCluster(GenerateCluster(kStreamDuration + kAudioBlockDuration,
3191 kStreamDuration + kVideoBlockDuration,
3192 3));
3194 // See that the range has increased appropriately (but not to the full
3195 // duration of 201293, since there is not enough video appended for that).
3196 CheckExpectedRanges(kSourceId, "{ [201191,201290) }");
3199 TEST_F(ChunkDemuxerTest, DurationChangeTimestampOffset) {
3200 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3201 ASSERT_TRUE(SetTimestampOffset(kSourceId, kDefaultDuration()));
3202 EXPECT_CALL(host_, SetDuration(
3203 kDefaultDuration() + base::TimeDelta::FromMilliseconds(
3204 kVideoBlockDuration * 2)));
3205 AppendCluster(GenerateCluster(0, 4));
3208 TEST_F(ChunkDemuxerTest, EndOfStreamTruncateDuration) {
3209 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3211 AppendCluster(kDefaultFirstCluster());
3213 EXPECT_CALL(host_, SetDuration(
3214 base::TimeDelta::FromMilliseconds(kDefaultFirstClusterEndTimestamp)));
3215 MarkEndOfStream(PIPELINE_OK);
3219 TEST_F(ChunkDemuxerTest, ZeroLengthAppend) {
3220 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3221 AppendData(NULL, 0);
3224 TEST_F(ChunkDemuxerTest, AppendAfterEndOfStream) {
3225 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3227 EXPECT_CALL(host_, SetDuration(_))
3228 .Times(AnyNumber());
3230 AppendCluster(kDefaultFirstCluster());
3231 MarkEndOfStream(PIPELINE_OK);
3233 demuxer_->UnmarkEndOfStream();
3235 AppendCluster(kDefaultSecondCluster());
3236 MarkEndOfStream(PIPELINE_OK);
3239 // Test receiving a Shutdown() call before we get an Initialize()
3240 // call. This can happen if video element gets destroyed before
3241 // the pipeline has a chance to initialize the demuxer.
3242 TEST_F(ChunkDemuxerTest, Shutdown_BeforeInitialize) {
3243 demuxer_->Shutdown();
3244 demuxer_->Initialize(
3245 &host_, CreateInitDoneCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
3246 message_loop_.RunUntilIdle();
3249 // Verifies that signaling end of stream while stalled at a gap
3250 // boundary does not trigger end of stream buffers to be returned.
3251 TEST_F(ChunkDemuxerTest, EndOfStreamWhileWaitingForGapToBeFilled) {
3252 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3254 AppendCluster(0, 10);
3255 AppendCluster(300, 10);
3256 CheckExpectedRanges(kSourceId, "{ [0,132) [300,432) }");
3258 GenerateExpectedReads(0, 10);
3260 bool audio_read_done = false;
3261 bool video_read_done = false;
3262 ReadAudio(base::Bind(&OnReadDone,
3263 base::TimeDelta::FromMilliseconds(138),
3264 &audio_read_done));
3265 ReadVideo(base::Bind(&OnReadDone,
3266 base::TimeDelta::FromMilliseconds(138),
3267 &video_read_done));
3269 // Verify that the reads didn't complete
3270 EXPECT_FALSE(audio_read_done);
3271 EXPECT_FALSE(video_read_done);
3273 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(438)));
3274 MarkEndOfStream(PIPELINE_OK);
3276 // Verify that the reads still haven't completed.
3277 EXPECT_FALSE(audio_read_done);
3278 EXPECT_FALSE(video_read_done);
3280 demuxer_->UnmarkEndOfStream();
3282 AppendCluster(138, 22);
3284 message_loop_.RunUntilIdle();
3286 CheckExpectedRanges(kSourceId, "{ [0,435) }");
3288 // Verify that the reads have completed.
3289 EXPECT_TRUE(audio_read_done);
3290 EXPECT_TRUE(video_read_done);
3292 // Read the rest of the buffers.
3293 GenerateExpectedReads(161, 171, 20);
3295 // Verify that reads block because the append cleared the end of stream state.
3296 audio_read_done = false;
3297 video_read_done = false;
3298 ReadAudio(base::Bind(&OnReadDone_EOSExpected,
3299 &audio_read_done));
3300 ReadVideo(base::Bind(&OnReadDone_EOSExpected,
3301 &video_read_done));
3303 // Verify that the reads don't complete.
3304 EXPECT_FALSE(audio_read_done);
3305 EXPECT_FALSE(video_read_done);
3307 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(437)));
3308 MarkEndOfStream(PIPELINE_OK);
3310 EXPECT_TRUE(audio_read_done);
3311 EXPECT_TRUE(video_read_done);
3314 TEST_F(ChunkDemuxerTest, CanceledSeekDuringInitialPreroll) {
3315 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3317 // Cancel preroll.
3318 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(200);
3319 demuxer_->CancelPendingSeek(seek_time);
3321 // Initiate the seek to the new location.
3322 Seek(seek_time);
3324 // Append data to satisfy the seek.
3325 AppendCluster(seek_time.InMilliseconds(), 10);
3328 TEST_F(ChunkDemuxerTest, SetMemoryLimitType) {
3329 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3331 // Set different memory limits for audio and video.
3332 demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 10 * kBlockSize);
3333 demuxer_->SetMemoryLimits(DemuxerStream::VIDEO, 5 * kBlockSize + 1);
3335 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(1000);
3337 // Append data at the start that can be garbage collected:
3338 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 0, 10);
3339 AppendSingleStreamCluster(kSourceId, kVideoTrackNum, 0, 5);
3341 // We should be right at buffer limit, should pass
3342 EXPECT_TRUE(demuxer_->EvictCodedFrames(
3343 kSourceId, base::TimeDelta::FromMilliseconds(0), 0));
3345 CheckExpectedRanges(DemuxerStream::AUDIO, "{ [0,230) }");
3346 CheckExpectedRanges(DemuxerStream::VIDEO, "{ [0,165) }");
3348 // Seek so we can garbage collect the data appended above.
3349 Seek(seek_time);
3351 // Append data at seek_time.
3352 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3353 seek_time.InMilliseconds(), 10);
3354 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3355 seek_time.InMilliseconds(), 5);
3357 // We should delete first append, and be exactly at buffer limit
3358 EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time, 0));
3360 // Verify that the old data, and nothing more, has been garbage collected.
3361 CheckExpectedRanges(DemuxerStream::AUDIO, "{ [1000,1230) }");
3362 CheckExpectedRanges(DemuxerStream::VIDEO, "{ [1000,1165) }");
3365 TEST_F(ChunkDemuxerTest, GCDuringSeek_SingleRange_SeekForward) {
3366 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3367 demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 10 * kBlockSize);
3368 // Append some data at position 1000ms
3369 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 1000, 10);
3370 CheckExpectedRanges(kSourceId, "{ [1000,1230) }");
3372 // GC should be able to evict frames in the currently buffered range, since
3373 // those frames are earlier than the seek target position.
3374 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(2000);
3375 Seek(seek_time);
3376 EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time, 5 * kBlockSize));
3378 // Append data to complete seek operation
3379 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 2000, 5);
3380 CheckExpectedRanges(kSourceId, "{ [1115,1230) [2000,2115) }");
3383 TEST_F(ChunkDemuxerTest, GCDuringSeek_SingleRange_SeekBack) {
3384 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3385 demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 10 * kBlockSize);
3386 // Append some data at position 1000ms
3387 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 1000, 10);
3388 CheckExpectedRanges(kSourceId, "{ [1000,1230) }");
3390 // GC should be able to evict frames in the currently buffered range, since
3391 // seek target position has no data and so we should allow some frames to be
3392 // evicted to make space for the upcoming append at seek target position.
3393 base::TimeDelta seek_time = base::TimeDelta();
3394 Seek(seek_time);
3395 EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time, 5 * kBlockSize));
3397 // Append data to complete seek operation
3398 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 0, 5);
3399 CheckExpectedRanges(kSourceId, "{ [0,115) [1115,1230) }");
3402 TEST_F(ChunkDemuxerTest, GCDuringSeek_MultipleRanges_SeekForward) {
3403 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3404 demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 10 * kBlockSize);
3405 // Append some data at position 1000ms then at 2000ms
3406 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 1000, 5);
3407 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 2000, 5);
3408 CheckExpectedRanges(kSourceId, "{ [1000,1115) [2000,2115) }");
3410 // GC should be able to evict frames in the currently buffered ranges, since
3411 // those frames are earlier than the seek target position.
3412 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(3000);
3413 Seek(seek_time);
3414 EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time, 8 * kBlockSize));
3416 // Append data to complete seek operation
3417 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 3000, 5);
3418 CheckExpectedRanges(kSourceId, "{ [2069,2115) [3000,3115) }");
3421 TEST_F(ChunkDemuxerTest, GCDuringSeek_MultipleRanges_SeekInbetween1) {
3422 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3423 demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 10 * kBlockSize);
3424 // Append some data at position 1000ms then at 2000ms
3425 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 1000, 5);
3426 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 2000, 5);
3427 CheckExpectedRanges(kSourceId, "{ [1000,1115) [2000,2115) }");
3429 // GC should be able to evict all frames from the first buffered range, since
3430 // those frames are earlier than the seek target position. But there's only 5
3431 // blocks worth of data in the first range and seek target position has no
3432 // data, so GC proceeds with trying to delete some frames from the back of
3433 // buffered ranges, that doesn't yield anything, since that's the most
3434 // recently appended data, so then GC starts removing data from the front of
3435 // the remaining buffered range (2000ms) to ensure we free up enough space for
3436 // the upcoming append and allow seek to proceed.
3437 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(1500);
3438 Seek(seek_time);
3439 EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time, 8 * kBlockSize));
3441 // Append data to complete seek operation
3442 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 1500, 5);
3443 CheckExpectedRanges(kSourceId, "{ [1500,1615) [2069,2115) }");
3446 TEST_F(ChunkDemuxerTest, GCDuringSeek_MultipleRanges_SeekInbetween2) {
3447 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3448 demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 10 * kBlockSize);
3450 // Append some data at position 2000ms first, then at 1000ms, so that the last
3451 // appended data position is in the first buffered range (that matters to the
3452 // GC algorithm since it tries to preserve more recently appended data).
3453 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 2000, 5);
3454 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 1000, 5);
3455 CheckExpectedRanges(kSourceId, "{ [1000,1115) [2000,2115) }");
3457 // Now try performing garbage collection without announcing seek first, i.e.
3458 // without calling Seek(), the GC algorithm should try to preserve data in the
3459 // first range, since that is most recently appended data.
3460 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(2030);
3461 EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time, 5 * kBlockSize));
3463 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 1500, 5);
3464 CheckExpectedRanges(kSourceId, "{ [1000,1115) [1500,1615) }");
3467 TEST_F(ChunkDemuxerTest, GCDuringSeek_MultipleRanges_SeekBack) {
3468 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3469 demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 10 * kBlockSize);
3470 // Append some data at position 1000ms then at 2000ms
3471 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 1000, 5);
3472 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 2000, 5);
3473 CheckExpectedRanges(kSourceId, "{ [1000,1115) [2000,2115) }");
3475 // GC should be able to evict frames in the currently buffered ranges, since
3476 // those frames are earlier than the seek target position.
3477 base::TimeDelta seek_time = base::TimeDelta();
3478 Seek(seek_time);
3479 EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time, 8 * kBlockSize));
3481 // Append data to complete seek operation
3482 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 0, 5);
3483 CheckExpectedRanges(kSourceId, "{ [0,115) [2069,2115) }");
3486 TEST_F(ChunkDemuxerTest, GCDuringSeek) {
3487 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3489 demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 5 * kBlockSize);
3491 base::TimeDelta seek_time1 = base::TimeDelta::FromMilliseconds(1000);
3492 base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(500);
3494 // Initiate a seek to |seek_time1|.
3495 Seek(seek_time1);
3497 // Append data to satisfy the first seek request.
3498 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3499 seek_time1.InMilliseconds(), 5);
3500 CheckExpectedRanges(kSourceId, "{ [1000,1115) }");
3502 // We are under memory limit, so Evict should be a no-op.
3503 EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time1, 0));
3504 CheckExpectedRanges(kSourceId, "{ [1000,1115) }");
3506 // Signal that the second seek is starting.
3507 demuxer_->StartWaitingForSeek(seek_time2);
3509 // Append data to satisfy the second seek.
3510 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3511 seek_time2.InMilliseconds(), 5);
3512 CheckExpectedRanges(kSourceId, "{ [500,615) [1000,1115) }");
3514 // We are now over our memory usage limit. We have just seeked to |seek_time2|
3515 // so data around 500ms position should be preserved, while the previous
3516 // append at 1000ms should be removed.
3517 EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time2, 0));
3518 CheckExpectedRanges(kSourceId, "{ [500,615) }");
3520 // Complete the seek.
3521 demuxer_->Seek(seek_time2, NewExpectedStatusCB(PIPELINE_OK));
3523 // Append more data and make sure that we preserve both the buffered range
3524 // around |seek_time2|, because that's the current playback position,
3525 // and the newly appended range, since this is the most recent append.
3526 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 700, 5);
3527 EXPECT_FALSE(demuxer_->EvictCodedFrames(kSourceId, seek_time2, 0));
3528 CheckExpectedRanges(kSourceId, "{ [500,615) [700,815) }");
3531 TEST_F(ChunkDemuxerTest, GCKeepPlayhead) {
3532 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3534 demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 5 * kBlockSize);
3536 // Append data at the start that can be garbage collected:
3537 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 0, 10);
3538 CheckExpectedRanges(kSourceId, "{ [0,230) }");
3540 // We expect garbage collection to fail, as we don't want to spontaneously
3541 // create gaps in source buffer stream. Gaps could break playback for many
3542 // clients, who don't bother to check ranges after append.
3543 EXPECT_FALSE(demuxer_->EvictCodedFrames(
3544 kSourceId, base::TimeDelta::FromMilliseconds(0), 0));
3545 CheckExpectedRanges(kSourceId, "{ [0,230) }");
3547 // Increase media_time a bit, this will allow some data to be collected, but
3548 // we are still over memory usage limit.
3549 base::TimeDelta seek_time1 = base::TimeDelta::FromMilliseconds(23*2);
3550 Seek(seek_time1);
3551 EXPECT_FALSE(demuxer_->EvictCodedFrames(kSourceId, seek_time1, 0));
3552 CheckExpectedRanges(kSourceId, "{ [46,230) }");
3554 base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(23*4);
3555 Seek(seek_time2);
3556 EXPECT_FALSE(demuxer_->EvictCodedFrames(kSourceId, seek_time2, 0));
3557 CheckExpectedRanges(kSourceId, "{ [92,230) }");
3559 // media_time has progressed to a point where we can collect enough data to
3560 // be under memory limit, so Evict should return true.
3561 base::TimeDelta seek_time3 = base::TimeDelta::FromMilliseconds(23*6);
3562 Seek(seek_time3);
3563 EXPECT_TRUE(demuxer_->EvictCodedFrames(kSourceId, seek_time3, 0));
3564 // Strictly speaking the current playback time is 23*6==138ms, so we could
3565 // release data up to 138ms, but we only release as much data as necessary
3566 // to bring memory usage under the limit, so we release only up to 115ms.
3567 CheckExpectedRanges(kSourceId, "{ [115,230) }");
3570 TEST_F(ChunkDemuxerTest, AppendWindow_Video) {
3571 ASSERT_TRUE(InitDemuxer(HAS_VIDEO));
3572 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3574 // Set the append window to [50,280).
3575 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
3576 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3578 // Append a cluster that starts before and ends after the append window.
3579 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3580 "0K 30 60 90 120K 150 180 210 240K 270 300 330K");
3582 // Verify that GOPs that start outside the window are not included
3583 // in the buffer. Also verify that buffers that start inside the
3584 // window and extend beyond the end of the window are not included.
3585 CheckExpectedRanges(kSourceId, "{ [120,270) }");
3586 CheckExpectedBuffers(stream, "120K 150 180 210 240K");
3588 // Extend the append window to [50,650).
3589 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3591 // Append more data and verify that adding buffers start at the next
3592 // key frame.
3593 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3594 "360 390 420K 450 480 510 540K 570 600 630K");
3595 CheckExpectedRanges(kSourceId, "{ [120,270) [420,630) }");
3598 TEST_F(ChunkDemuxerTest, AppendWindow_Audio) {
3599 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3600 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3602 // Set the append window to [50,280).
3603 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
3604 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3606 // Append a cluster that starts before and ends after the append window.
3607 AppendSingleStreamCluster(
3608 kSourceId, kAudioTrackNum,
3609 "0K 30K 60K 90K 120K 150K 180K 210K 240K 270K 300K 330K");
3611 // Verify that frames that end outside the window are not included
3612 // in the buffer. Also verify that buffers that start inside the
3613 // window and extend beyond the end of the window are not included.
3615 // The first 50ms of the range should be truncated since it overlaps
3616 // the start of the append window.
3617 CheckExpectedRanges(kSourceId, "{ [50,280) }");
3619 // The "50P" buffer is the "0" buffer marked for complete discard. The next
3620 // "50" buffer is the "30" buffer marked with 20ms of start discard.
3621 CheckExpectedBuffers(stream, "50KP 50K 60K 90K 120K 150K 180K 210K 240K");
3623 // Extend the append window to [50,650).
3624 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3626 // Append more data and verify that a new range is created.
3627 AppendSingleStreamCluster(
3628 kSourceId, kAudioTrackNum,
3629 "360K 390K 420K 450K 480K 510K 540K 570K 600K 630K");
3630 CheckExpectedRanges(kSourceId, "{ [50,280) [360,650) }");
3633 TEST_F(ChunkDemuxerTest, AppendWindow_AudioOverlapStartAndEnd) {
3634 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3636 // Set the append window to [10,20).
3637 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(10);
3638 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
3640 // Append a cluster that starts before and ends after the append window.
3641 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K");
3643 // Verify the append is clipped to the append window.
3644 CheckExpectedRanges(kSourceId, "{ [10,20) }");
3647 TEST_F(ChunkDemuxerTest, AppendWindow_WebMFile_AudioOnly) {
3648 EXPECT_CALL(*this, DemuxerOpened());
3649 demuxer_->Initialize(
3650 &host_,
3651 CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744), PIPELINE_OK),
3652 true);
3653 ASSERT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, HAS_AUDIO));
3655 // Set the append window to [50,150).
3656 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
3657 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(150);
3659 // Read a WebM file into memory and send the data to the demuxer. The chunk
3660 // size has been chosen carefully to ensure the preroll buffer used by the
3661 // partial append window trim must come from a previous Append() call.
3662 scoped_refptr<DecoderBuffer> buffer =
3663 ReadTestDataFile("bear-320x240-audio-only.webm");
3664 EXPECT_CALL(*this, InitSegmentReceived());
3665 AppendDataInPieces(buffer->data(), buffer->data_size(), 128);
3667 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3668 CheckExpectedBuffers(stream, "50KP 50K 62K 86K 109K 122K 125K 128K");
3671 TEST_F(ChunkDemuxerTest, AppendWindow_AudioConfigUpdateRemovesPreroll) {
3672 EXPECT_CALL(*this, DemuxerOpened());
3673 demuxer_->Initialize(
3674 &host_,
3675 CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744), PIPELINE_OK),
3676 true);
3677 ASSERT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, HAS_AUDIO));
3679 // Set the append window such that the first file is completely before the
3680 // append window.
3681 // Expect duration adjustment since actual duration differs slightly from
3682 // duration in the init segment.
3683 const base::TimeDelta duration_1 = base::TimeDelta::FromMilliseconds(2746);
3684 append_window_start_for_next_append_ = duration_1;
3686 // Read a WebM file into memory and append the data.
3687 scoped_refptr<DecoderBuffer> buffer =
3688 ReadTestDataFile("bear-320x240-audio-only.webm");
3689 EXPECT_CALL(*this, InitSegmentReceived());
3690 AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
3691 CheckExpectedRanges(kSourceId, "{ }");
3693 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3694 AudioDecoderConfig config_1 = stream->audio_decoder_config();
3696 // Read a second WebM with a different config in and append the data.
3697 scoped_refptr<DecoderBuffer> buffer2 =
3698 ReadTestDataFile("bear-320x240-audio-only-48khz.webm");
3699 EXPECT_CALL(*this, InitSegmentReceived());
3700 EXPECT_CALL(host_, SetDuration(_)).Times(AnyNumber());
3701 ASSERT_TRUE(SetTimestampOffset(kSourceId, duration_1));
3702 AppendDataInPieces(buffer2->data(), buffer2->data_size(), 512);
3703 CheckExpectedRanges(kSourceId, "{ [2746,5519) }");
3705 Seek(duration_1);
3706 ExpectConfigChanged(DemuxerStream::AUDIO);
3707 ASSERT_FALSE(config_1.Matches(stream->audio_decoder_config()));
3708 CheckExpectedBuffers(stream, "2746K 2767K 2789K 2810K");
3711 TEST_F(ChunkDemuxerTest, AppendWindow_Text) {
3712 DemuxerStream* text_stream = NULL;
3713 EXPECT_CALL(host_, AddTextStream(_, _))
3714 .WillOnce(SaveArg<0>(&text_stream));
3715 ASSERT_TRUE(InitDemuxer(HAS_VIDEO | HAS_TEXT));
3716 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3718 // Set the append window to [20,280).
3719 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
3720 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3722 // Append a cluster that starts before and ends after the append
3723 // window.
3724 AppendMuxedCluster(
3725 MuxedStreamInfo(kVideoTrackNum,
3726 "0K 30 60 90 120K 150 180 210 240K 270 300 330K"),
3727 MuxedStreamInfo(kTextTrackNum, "0K 100K 200K 300K" ));
3729 // Verify that text cues that start outside the window are not included
3730 // in the buffer. Also verify that cues that extend beyond the
3731 // window are not included.
3732 CheckExpectedRanges(kSourceId, "{ [100,270) }");
3733 CheckExpectedBuffers(video_stream, "120K 150 180 210 240K");
3734 CheckExpectedBuffers(text_stream, "100K");
3736 // Extend the append window to [20,650).
3737 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3739 // Append more data and verify that a new range is created.
3740 AppendMuxedCluster(
3741 MuxedStreamInfo(kVideoTrackNum,
3742 "360 390 420K 450 480 510 540K 570 600 630K"),
3743 MuxedStreamInfo(kTextTrackNum, "400K 500K 600K 700K" ));
3744 CheckExpectedRanges(kSourceId, "{ [100,270) [400,630) }");
3746 // Seek to the new range and verify that the expected buffers are returned.
3747 Seek(base::TimeDelta::FromMilliseconds(420));
3748 CheckExpectedBuffers(video_stream, "420K 450 480 510 540K 570 600");
3749 CheckExpectedBuffers(text_stream, "400K 500K");
3752 TEST_F(ChunkDemuxerTest, StartWaitingForSeekAfterParseError) {
3753 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3754 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
3755 AppendGarbage();
3756 base::TimeDelta seek_time = base::TimeDelta::FromSeconds(50);
3757 demuxer_->StartWaitingForSeek(seek_time);
3760 TEST_F(ChunkDemuxerTest, Remove_AudioVideoText) {
3761 DemuxerStream* text_stream = NULL;
3762 EXPECT_CALL(host_, AddTextStream(_, _))
3763 .WillOnce(SaveArg<0>(&text_stream));
3764 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
3766 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3767 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3769 AppendMuxedCluster(
3770 MuxedStreamInfo(kAudioTrackNum, "0K 20K 40K 60K 80K 100K 120K 140K"),
3771 MuxedStreamInfo(kVideoTrackNum, "0K 30 60 90 120K 150 180"),
3772 MuxedStreamInfo(kTextTrackNum, "0K 100K 200K"));
3774 CheckExpectedBuffers(audio_stream, "0K 20K 40K 60K 80K 100K 120K 140K");
3775 CheckExpectedBuffers(video_stream, "0K 30 60 90 120K 150 180");
3776 CheckExpectedBuffers(text_stream, "0K 100K 200K");
3778 // Remove the buffers that were added.
3779 demuxer_->Remove(kSourceId, base::TimeDelta(),
3780 base::TimeDelta::FromMilliseconds(300));
3782 // Verify that all the appended data has been removed.
3783 CheckExpectedRanges(kSourceId, "{ }");
3785 // Append new buffers that are clearly different than the original
3786 // ones and verify that only the new buffers are returned.
3787 AppendMuxedCluster(
3788 MuxedStreamInfo(kAudioTrackNum, "1K 21K 41K 61K 81K 101K 121K 141K"),
3789 MuxedStreamInfo(kVideoTrackNum, "1K 31 61 91 121K 151 181"),
3790 MuxedStreamInfo(kTextTrackNum, "1K 101K 201K"));
3792 Seek(base::TimeDelta());
3793 CheckExpectedBuffers(audio_stream, "1K 21K 41K 61K 81K 101K 121K 141K");
3794 CheckExpectedBuffers(video_stream, "1K 31 61 91 121K 151 181");
3795 CheckExpectedBuffers(text_stream, "1K 101K 201K");
3798 TEST_F(ChunkDemuxerTest, Remove_StartAtDuration) {
3799 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3800 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3802 // Set the duration to something small so that the append that
3803 // follows updates the duration to reflect the end of the appended data.
3804 EXPECT_CALL(host_, SetDuration(
3805 base::TimeDelta::FromMilliseconds(1)));
3806 demuxer_->SetDuration(0.001);
3808 EXPECT_CALL(host_, SetDuration(
3809 base::TimeDelta::FromMilliseconds(160)));
3810 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3811 "0K 20K 40K 60K 80K 100K 120K 140K");
3813 CheckExpectedRanges(kSourceId, "{ [0,160) }");
3814 CheckExpectedBuffers(audio_stream, "0K 20K 40K 60K 80K 100K 120K 140K");
3816 demuxer_->Remove(kSourceId,
3817 base::TimeDelta::FromSecondsD(demuxer_->GetDuration()),
3818 kInfiniteDuration());
3820 Seek(base::TimeDelta());
3821 CheckExpectedRanges(kSourceId, "{ [0,160) }");
3822 CheckExpectedBuffers(audio_stream, "0K 20K 40K 60K 80K 100K 120K 140K");
3825 // Verifies that a Seek() will complete without text cues for
3826 // the seek point and will return cues after the seek position
3827 // when they are eventually appended.
3828 TEST_F(ChunkDemuxerTest, SeekCompletesWithoutTextCues) {
3829 DemuxerStream* text_stream = NULL;
3830 EXPECT_CALL(host_, AddTextStream(_, _))
3831 .WillOnce(SaveArg<0>(&text_stream));
3832 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
3834 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3835 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3837 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(120);
3838 bool seek_cb_was_called = false;
3839 demuxer_->StartWaitingForSeek(seek_time);
3840 demuxer_->Seek(seek_time,
3841 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
3842 message_loop_.RunUntilIdle();
3844 EXPECT_FALSE(seek_cb_was_called);
3846 bool text_read_done = false;
3847 text_stream->Read(base::Bind(&OnReadDone,
3848 base::TimeDelta::FromMilliseconds(225),
3849 &text_read_done));
3851 // Append audio & video data so the seek completes.
3852 AppendMuxedCluster(
3853 MuxedStreamInfo(kAudioTrackNum,
3854 "0K 20K 40K 60K 80K 100K 120K 140K 160K 180K 200K"),
3855 MuxedStreamInfo(kVideoTrackNum, "0K 30 60 90 120K 150 180 210"));
3857 message_loop_.RunUntilIdle();
3858 EXPECT_TRUE(seek_cb_was_called);
3859 EXPECT_FALSE(text_read_done);
3861 // Read some audio & video buffers to further verify seek completion.
3862 CheckExpectedBuffers(audio_stream, "120K 140K");
3863 CheckExpectedBuffers(video_stream, "120K 150");
3865 EXPECT_FALSE(text_read_done);
3867 // Append text cues that start after the seek point and verify that
3868 // they are returned by Read() calls.
3869 AppendMuxedCluster(
3870 MuxedStreamInfo(kAudioTrackNum, "220K 240K 260K 280K"),
3871 MuxedStreamInfo(kVideoTrackNum, "240K 270 300 330"),
3872 MuxedStreamInfo(kTextTrackNum, "225K 275K 325K"));
3874 message_loop_.RunUntilIdle();
3875 EXPECT_TRUE(text_read_done);
3877 // NOTE: we start at 275 here because the buffer at 225 was returned
3878 // to the pending read initiated above.
3879 CheckExpectedBuffers(text_stream, "275K 325K");
3881 // Verify that audio & video streams continue to return expected values.
3882 CheckExpectedBuffers(audio_stream, "160K 180K");
3883 CheckExpectedBuffers(video_stream, "180 210");
3886 TEST_F(ChunkDemuxerTest, ClusterWithUnknownSize) {
3887 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3889 AppendCluster(GenerateCluster(0, 0, 4, true));
3890 CheckExpectedRanges(kSourceId, "{ [0,46) }");
3892 // A new cluster indicates end of the previous cluster with unknown size.
3893 AppendCluster(GenerateCluster(46, 66, 5, true));
3894 CheckExpectedRanges(kSourceId, "{ [0,115) }");
3897 TEST_F(ChunkDemuxerTest, CuesBetweenClustersWithUnknownSize) {
3898 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3900 // Add two clusters separated by Cues in a single Append() call.
3901 scoped_ptr<Cluster> cluster = GenerateCluster(0, 0, 4, true);
3902 std::vector<uint8> data(cluster->data(), cluster->data() + cluster->size());
3903 data.insert(data.end(), kCuesHeader, kCuesHeader + sizeof(kCuesHeader));
3904 cluster = GenerateCluster(46, 66, 5, true);
3905 data.insert(data.end(), cluster->data(), cluster->data() + cluster->size());
3906 AppendData(&*data.begin(), data.size());
3908 CheckExpectedRanges(kSourceId, "{ [0,115) }");
3911 TEST_F(ChunkDemuxerTest, CuesBetweenClusters) {
3912 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3914 AppendCluster(GenerateCluster(0, 0, 4));
3915 AppendData(kCuesHeader, sizeof(kCuesHeader));
3916 AppendCluster(GenerateCluster(46, 66, 5));
3917 CheckExpectedRanges(kSourceId, "{ [0,115) }");
3920 TEST_F(ChunkDemuxerTest, EvictCodedFramesTest) {
3921 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3922 demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 10 * kBlockSize);
3923 demuxer_->SetMemoryLimits(DemuxerStream::VIDEO, 15 * kBlockSize);
3924 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3925 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3927 const char* kAudioStreamInfo = "0K 40K 80K 120K 160K 200K 240K 280K";
3928 const char* kVideoStreamInfo = "0K 10 20K 30 40K 50 60K 70 80K 90 100K "
3929 "110 120K 130 140K";
3930 // Append 8 blocks (80 bytes) of data to audio stream and 15 blocks (150
3931 // bytes) to video stream.
3932 AppendMuxedCluster(
3933 MuxedStreamInfo(kAudioTrackNum, kAudioStreamInfo),
3934 MuxedStreamInfo(kVideoTrackNum, kVideoStreamInfo));
3935 CheckExpectedBuffers(audio_stream, kAudioStreamInfo);
3936 CheckExpectedBuffers(video_stream, kVideoStreamInfo);
3938 // If we want to append 80 more blocks of muxed a+v data and the current
3939 // position is 0, that will fail, because EvictCodedFrames won't remove the
3940 // data after the current playback position.
3941 ASSERT_FALSE(demuxer_->EvictCodedFrames(kSourceId,
3942 base::TimeDelta::FromMilliseconds(0),
3943 80));
3944 // EvictCodedFrames has failed, so data should be unchanged.
3945 Seek(base::TimeDelta::FromMilliseconds(0));
3946 CheckExpectedBuffers(audio_stream, kAudioStreamInfo);
3947 CheckExpectedBuffers(video_stream, kVideoStreamInfo);
3949 // But if we pretend that playback position has moved to 120ms, that allows
3950 // EvictCodedFrames to garbage-collect enough data to succeed.
3951 ASSERT_TRUE(demuxer_->EvictCodedFrames(kSourceId,
3952 base::TimeDelta::FromMilliseconds(120),
3953 80));
3955 Seek(base::TimeDelta::FromMilliseconds(0));
3956 // Audio stream had 8 buffers, video stream had 15. We told EvictCodedFrames
3957 // that the new data size is 8 blocks muxed, i.e. 80 bytes. Given the current
3958 // ratio of video to the total data size (15 : (8+15) ~= 0.65) the estimated
3959 // sizes of video and audio data in the new 80 byte chunk are 52 bytes for
3960 // video (80*0.65 = 52) and 28 bytes for audio (80 - 52).
3961 // Given these numbers MSE GC will remove just one audio block (since current
3962 // audio size is 80 bytes, new data is 28 bytes, we need to remove just one 10
3963 // byte block to stay under 100 bytes memory limit after append
3964 // 80 - 10 + 28 = 98).
3965 // For video stream 150 + 52 = 202. Video limit is 150 bytes. We need to
3966 // remove at least 6 blocks to stay under limit.
3967 CheckExpectedBuffers(audio_stream, "40K 80K 120K 160K 200K 240K 280K");
3968 CheckExpectedBuffers(video_stream, "60K 70 80K 90 100K 110 120K 130 140K");
3971 } // namespace media