Move sandboxed_unpacker.{h,cc} from chrome/ to extensions/
[chromium-blink-merge.git] / media / filters / chunk_demuxer_unittest.cc
blob4f03519b613dff9adb97847c8d78acb135c45f8d
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include <algorithm>
7 #include "base/bind.h"
8 #include "base/message_loop/message_loop.h"
9 #include "base/strings/string_number_conversions.h"
10 #include "base/strings/string_split.h"
11 #include "base/strings/string_util.h"
12 #include "media/base/audio_decoder_config.h"
13 #include "media/base/decoder_buffer.h"
14 #include "media/base/decrypt_config.h"
15 #include "media/base/mock_demuxer_host.h"
16 #include "media/base/test_data_util.h"
17 #include "media/base/test_helpers.h"
18 #include "media/filters/chunk_demuxer.h"
19 #include "media/formats/webm/cluster_builder.h"
20 #include "media/formats/webm/webm_constants.h"
21 #include "media/formats/webm/webm_crypto_helpers.h"
22 #include "testing/gtest/include/gtest/gtest.h"
24 using ::testing::AnyNumber;
25 using ::testing::Exactly;
26 using ::testing::InSequence;
27 using ::testing::NotNull;
28 using ::testing::Return;
29 using ::testing::SaveArg;
30 using ::testing::SetArgumentPointee;
31 using ::testing::_;
33 namespace media {
35 const uint8 kTracksHeader[] = {
36 0x16, 0x54, 0xAE, 0x6B, // Tracks ID
37 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // tracks(size = 0)
40 // WebM Block bytes that represent a VP8 key frame.
41 const uint8 kVP8Keyframe[] = {
42 0x010, 0x00, 0x00, 0x9d, 0x01, 0x2a, 0x00, 0x10, 0x00, 0x10, 0x00
45 // WebM Block bytes that represent a VP8 interframe.
46 const uint8 kVP8Interframe[] = { 0x11, 0x00, 0x00 };
48 const uint8 kCuesHeader[] = {
49 0x1C, 0x53, 0xBB, 0x6B, // Cues ID
50 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // cues(size = 0)
53 const uint8 kEncryptedMediaInitData[] = {
54 0x68, 0xFE, 0xF9, 0xA1, 0xB3, 0x0D, 0x6B, 0x4D,
55 0xF2, 0x22, 0xB5, 0x0B, 0x4D, 0xE9, 0xE9, 0x95,
58 const int kTracksHeaderSize = sizeof(kTracksHeader);
59 const int kTracksSizeOffset = 4;
61 // The size of TrackEntry element in test file "webm_vorbis_track_entry" starts
62 // at index 1 and spans 8 bytes.
63 const int kAudioTrackSizeOffset = 1;
64 const int kAudioTrackSizeWidth = 8;
65 const int kAudioTrackEntryHeaderSize =
66 kAudioTrackSizeOffset + kAudioTrackSizeWidth;
68 // The size of TrackEntry element in test file "webm_vp8_track_entry" starts at
69 // index 1 and spans 8 bytes.
70 const int kVideoTrackSizeOffset = 1;
71 const int kVideoTrackSizeWidth = 8;
72 const int kVideoTrackEntryHeaderSize =
73 kVideoTrackSizeOffset + kVideoTrackSizeWidth;
75 const int kVideoTrackNum = 1;
76 const int kAudioTrackNum = 2;
77 const int kTextTrackNum = 3;
78 const int kAlternateTextTrackNum = 4;
80 const int kAudioBlockDuration = 23;
81 const int kVideoBlockDuration = 33;
82 const int kTextBlockDuration = 100;
83 const int kBlockSize = 10;
85 const char kSourceId[] = "SourceId";
86 const char kDefaultFirstClusterRange[] = "{ [0,46) }";
87 const int kDefaultFirstClusterEndTimestamp = 66;
88 const int kDefaultSecondClusterEndTimestamp = 132;
90 base::TimeDelta kDefaultDuration() {
91 return base::TimeDelta::FromMilliseconds(201224);
94 // Write an integer into buffer in the form of vint that spans 8 bytes.
95 // The data pointed by |buffer| should be at least 8 bytes long.
96 // |number| should be in the range 0 <= number < 0x00FFFFFFFFFFFFFF.
97 static void WriteInt64(uint8* buffer, int64 number) {
98 DCHECK(number >= 0 && number < 0x00FFFFFFFFFFFFFFLL);
99 buffer[0] = 0x01;
100 int64 tmp = number;
101 for (int i = 7; i > 0; i--) {
102 buffer[i] = tmp & 0xff;
103 tmp >>= 8;
107 MATCHER_P(HasTimestamp, timestamp_in_ms, "") {
108 return arg.get() && !arg->end_of_stream() &&
109 arg->timestamp().InMilliseconds() == timestamp_in_ms;
112 MATCHER(IsEndOfStream, "") { return arg.get() && arg->end_of_stream(); }
114 static void OnReadDone(const base::TimeDelta& expected_time,
115 bool* called,
116 DemuxerStream::Status status,
117 const scoped_refptr<DecoderBuffer>& buffer) {
118 EXPECT_EQ(status, DemuxerStream::kOk);
119 EXPECT_EQ(expected_time, buffer->timestamp());
120 *called = true;
123 static void OnReadDone_AbortExpected(
124 bool* called, DemuxerStream::Status status,
125 const scoped_refptr<DecoderBuffer>& buffer) {
126 EXPECT_EQ(status, DemuxerStream::kAborted);
127 EXPECT_EQ(NULL, buffer.get());
128 *called = true;
131 static void OnReadDone_EOSExpected(bool* called,
132 DemuxerStream::Status status,
133 const scoped_refptr<DecoderBuffer>& buffer) {
134 EXPECT_EQ(status, DemuxerStream::kOk);
135 EXPECT_TRUE(buffer->end_of_stream());
136 *called = true;
139 static void OnSeekDone_OKExpected(bool* called, PipelineStatus status) {
140 EXPECT_EQ(status, PIPELINE_OK);
141 *called = true;
144 static void LogFunc(const std::string& str) { DVLOG(1) << str; }
146 class ChunkDemuxerTest : public ::testing::Test {
147 protected:
148 enum CodecsIndex {
149 AUDIO,
150 VIDEO,
151 MAX_CODECS_INDEX
154 // Default cluster to append first for simple tests.
155 scoped_ptr<Cluster> kDefaultFirstCluster() {
156 return GenerateCluster(0, 4);
159 // Default cluster to append after kDefaultFirstCluster()
160 // has been appended. This cluster starts with blocks that
161 // have timestamps consistent with the end times of the blocks
162 // in kDefaultFirstCluster() so that these two clusters represent
163 // a continuous region.
164 scoped_ptr<Cluster> kDefaultSecondCluster() {
165 return GenerateCluster(46, 66, 5);
168 ChunkDemuxerTest()
169 : append_window_end_for_next_append_(kInfiniteDuration()) {
170 init_segment_received_cb_ =
171 base::Bind(&ChunkDemuxerTest::InitSegmentReceived,
172 base::Unretained(this));
173 CreateNewDemuxer();
176 void CreateNewDemuxer() {
177 base::Closure open_cb =
178 base::Bind(&ChunkDemuxerTest::DemuxerOpened, base::Unretained(this));
179 Demuxer::EncryptedMediaInitDataCB encrypted_media_init_data_cb = base::Bind(
180 &ChunkDemuxerTest::OnEncryptedMediaInitData, base::Unretained(this));
181 demuxer_.reset(new ChunkDemuxer(
182 open_cb, encrypted_media_init_data_cb, base::Bind(&LogFunc),
183 scoped_refptr<MediaLog>(new MediaLog()), true));
186 virtual ~ChunkDemuxerTest() {
187 ShutdownDemuxer();
190 void CreateInitSegment(int stream_flags,
191 bool is_audio_encrypted,
192 bool is_video_encrypted,
193 scoped_ptr<uint8[]>* buffer,
194 int* size) {
195 CreateInitSegmentInternal(
196 stream_flags, is_audio_encrypted, is_video_encrypted, buffer, false,
197 size);
200 void CreateInitSegmentWithAlternateTextTrackNum(int stream_flags,
201 bool is_audio_encrypted,
202 bool is_video_encrypted,
203 scoped_ptr<uint8[]>* buffer,
204 int* size) {
205 DCHECK(stream_flags & HAS_TEXT);
206 CreateInitSegmentInternal(
207 stream_flags, is_audio_encrypted, is_video_encrypted, buffer, true,
208 size);
211 void CreateInitSegmentInternal(int stream_flags,
212 bool is_audio_encrypted,
213 bool is_video_encrypted,
214 scoped_ptr<uint8[]>* buffer,
215 bool use_alternate_text_track_id,
216 int* size) {
217 bool has_audio = (stream_flags & HAS_AUDIO) != 0;
218 bool has_video = (stream_flags & HAS_VIDEO) != 0;
219 bool has_text = (stream_flags & HAS_TEXT) != 0;
220 scoped_refptr<DecoderBuffer> ebml_header;
221 scoped_refptr<DecoderBuffer> info;
222 scoped_refptr<DecoderBuffer> audio_track_entry;
223 scoped_refptr<DecoderBuffer> video_track_entry;
224 scoped_refptr<DecoderBuffer> audio_content_encodings;
225 scoped_refptr<DecoderBuffer> video_content_encodings;
226 scoped_refptr<DecoderBuffer> text_track_entry;
228 ebml_header = ReadTestDataFile("webm_ebml_element");
230 info = ReadTestDataFile("webm_info_element");
232 int tracks_element_size = 0;
234 if (has_audio) {
235 audio_track_entry = ReadTestDataFile("webm_vorbis_track_entry");
236 tracks_element_size += audio_track_entry->data_size();
237 if (is_audio_encrypted) {
238 audio_content_encodings = ReadTestDataFile("webm_content_encodings");
239 tracks_element_size += audio_content_encodings->data_size();
243 if (has_video) {
244 video_track_entry = ReadTestDataFile("webm_vp8_track_entry");
245 tracks_element_size += video_track_entry->data_size();
246 if (is_video_encrypted) {
247 video_content_encodings = ReadTestDataFile("webm_content_encodings");
248 tracks_element_size += video_content_encodings->data_size();
252 if (has_text) {
253 // TODO(matthewjheaney): create an abstraction to do
254 // this (http://crbug/321454).
255 // We need it to also handle the creation of multiple text tracks.
257 // This is the track entry for a text track,
258 // TrackEntry [AE], size=30
259 // TrackNum [D7], size=1, val=3 (or 4 if use_alternate_text_track_id)
260 // TrackUID [73] [C5], size=1, value=3 (must remain constant for same
261 // track, even if TrackNum changes)
262 // TrackType [83], size=1, val=0x11
263 // CodecId [86], size=18, val="D_WEBVTT/SUBTITLES"
264 char str[] = "\xAE\x9E\xD7\x81\x03\x73\xC5\x81\x03"
265 "\x83\x81\x11\x86\x92"
266 "D_WEBVTT/SUBTITLES";
267 DCHECK_EQ(str[4], kTextTrackNum);
268 if (use_alternate_text_track_id)
269 str[4] = kAlternateTextTrackNum;
271 const int len = strlen(str);
272 DCHECK_EQ(len, 32);
273 const uint8* const buf = reinterpret_cast<const uint8*>(str);
274 text_track_entry = DecoderBuffer::CopyFrom(buf, len);
275 tracks_element_size += text_track_entry->data_size();
278 *size = ebml_header->data_size() + info->data_size() +
279 kTracksHeaderSize + tracks_element_size;
281 buffer->reset(new uint8[*size]);
283 uint8* buf = buffer->get();
284 memcpy(buf, ebml_header->data(), ebml_header->data_size());
285 buf += ebml_header->data_size();
287 memcpy(buf, info->data(), info->data_size());
288 buf += info->data_size();
290 memcpy(buf, kTracksHeader, kTracksHeaderSize);
291 WriteInt64(buf + kTracksSizeOffset, tracks_element_size);
292 buf += kTracksHeaderSize;
294 // TODO(xhwang): Simplify this! Probably have test data files that contain
295 // ContentEncodings directly instead of trying to create one at run-time.
296 if (has_audio) {
297 memcpy(buf, audio_track_entry->data(),
298 audio_track_entry->data_size());
299 if (is_audio_encrypted) {
300 memcpy(buf + audio_track_entry->data_size(),
301 audio_content_encodings->data(),
302 audio_content_encodings->data_size());
303 WriteInt64(buf + kAudioTrackSizeOffset,
304 audio_track_entry->data_size() +
305 audio_content_encodings->data_size() -
306 kAudioTrackEntryHeaderSize);
307 buf += audio_content_encodings->data_size();
309 buf += audio_track_entry->data_size();
312 if (has_video) {
313 memcpy(buf, video_track_entry->data(),
314 video_track_entry->data_size());
315 if (is_video_encrypted) {
316 memcpy(buf + video_track_entry->data_size(),
317 video_content_encodings->data(),
318 video_content_encodings->data_size());
319 WriteInt64(buf + kVideoTrackSizeOffset,
320 video_track_entry->data_size() +
321 video_content_encodings->data_size() -
322 kVideoTrackEntryHeaderSize);
323 buf += video_content_encodings->data_size();
325 buf += video_track_entry->data_size();
328 if (has_text) {
329 memcpy(buf, text_track_entry->data(),
330 text_track_entry->data_size());
331 buf += text_track_entry->data_size();
335 ChunkDemuxer::Status AddId() {
336 return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
339 ChunkDemuxer::Status AddId(const std::string& source_id, int stream_flags) {
340 bool has_audio = (stream_flags & HAS_AUDIO) != 0;
341 bool has_video = (stream_flags & HAS_VIDEO) != 0;
342 std::vector<std::string> codecs;
343 std::string type;
345 if (has_audio) {
346 codecs.push_back("vorbis");
347 type = "audio/webm";
350 if (has_video) {
351 codecs.push_back("vp8");
352 type = "video/webm";
355 if (!has_audio && !has_video) {
356 return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
359 return demuxer_->AddId(source_id, type, codecs);
362 ChunkDemuxer::Status AddIdForMp2tSource(const std::string& source_id) {
363 std::vector<std::string> codecs;
364 std::string type = "video/mp2t";
365 codecs.push_back("mp4a.40.2");
366 codecs.push_back("avc1.640028");
367 return demuxer_->AddId(source_id, type, codecs);
370 void AppendData(const uint8* data, size_t length) {
371 AppendData(kSourceId, data, length);
374 void AppendCluster(const std::string& source_id,
375 scoped_ptr<Cluster> cluster) {
376 AppendData(source_id, cluster->data(), cluster->size());
379 void AppendCluster(scoped_ptr<Cluster> cluster) {
380 AppendCluster(kSourceId, cluster.Pass());
383 void AppendCluster(int timecode, int block_count) {
384 AppendCluster(GenerateCluster(timecode, block_count));
387 void AppendSingleStreamCluster(const std::string& source_id, int track_number,
388 int timecode, int block_count) {
389 int block_duration = 0;
390 switch (track_number) {
391 case kVideoTrackNum:
392 block_duration = kVideoBlockDuration;
393 break;
394 case kAudioTrackNum:
395 block_duration = kAudioBlockDuration;
396 break;
397 case kTextTrackNum: // Fall-through.
398 case kAlternateTextTrackNum:
399 block_duration = kTextBlockDuration;
400 break;
402 ASSERT_NE(block_duration, 0);
403 int end_timecode = timecode + block_count * block_duration;
404 AppendCluster(source_id,
405 GenerateSingleStreamCluster(
406 timecode, end_timecode, track_number, block_duration));
409 struct BlockInfo {
410 BlockInfo()
411 : track_number(0),
412 timestamp_in_ms(0),
413 flags(0),
414 duration(0) {
417 BlockInfo(int tn, int ts, int f, int d)
418 : track_number(tn),
419 timestamp_in_ms(ts),
420 flags(f),
421 duration(d) {
424 int track_number;
425 int timestamp_in_ms;
426 int flags;
427 int duration;
429 bool operator< (const BlockInfo& rhs) const {
430 return timestamp_in_ms < rhs.timestamp_in_ms;
434 // |track_number| - The track number to place in
435 // |block_descriptions| - A space delimited string of block info that
436 // is used to populate |blocks|. Each block info has a timestamp in
437 // milliseconds and optionally followed by a 'K' to indicate that a block
438 // should be marked as a key frame. For example "0K 30 60" should populate
439 // |blocks| with 3 BlockInfo objects: a key frame with timestamp 0 and 2
440 // non-key-frames at 30ms and 60ms.
441 void ParseBlockDescriptions(int track_number,
442 const std::string block_descriptions,
443 std::vector<BlockInfo>* blocks) {
444 std::vector<std::string> timestamps;
445 base::SplitString(block_descriptions, ' ', &timestamps);
447 for (size_t i = 0; i < timestamps.size(); ++i) {
448 std::string timestamp_str = timestamps[i];
449 BlockInfo block_info;
450 block_info.track_number = track_number;
451 block_info.flags = 0;
452 block_info.duration = 0;
454 if (EndsWith(timestamp_str, "K", true)) {
455 block_info.flags = kWebMFlagKeyframe;
456 // Remove the "K" off of the token.
457 timestamp_str = timestamp_str.substr(0, timestamps[i].length() - 1);
459 CHECK(base::StringToInt(timestamp_str, &block_info.timestamp_in_ms));
461 if (track_number == kTextTrackNum ||
462 track_number == kAlternateTextTrackNum) {
463 block_info.duration = kTextBlockDuration;
464 ASSERT_EQ(kWebMFlagKeyframe, block_info.flags)
465 << "Text block with timestamp " << block_info.timestamp_in_ms
466 << " was not marked as a key frame."
467 << " All text blocks must be key frames";
470 if (track_number == kAudioTrackNum)
471 ASSERT_TRUE(block_info.flags & kWebMFlagKeyframe);
473 blocks->push_back(block_info);
477 scoped_ptr<Cluster> GenerateCluster(const std::vector<BlockInfo>& blocks,
478 bool unknown_size) {
479 DCHECK_GT(blocks.size(), 0u);
480 ClusterBuilder cb;
482 std::vector<uint8> data(10);
483 for (size_t i = 0; i < blocks.size(); ++i) {
484 if (i == 0)
485 cb.SetClusterTimecode(blocks[i].timestamp_in_ms);
487 if (blocks[i].duration) {
488 if (blocks[i].track_number == kVideoTrackNum) {
489 AddVideoBlockGroup(&cb,
490 blocks[i].track_number, blocks[i].timestamp_in_ms,
491 blocks[i].duration, blocks[i].flags);
492 } else {
493 cb.AddBlockGroup(blocks[i].track_number, blocks[i].timestamp_in_ms,
494 blocks[i].duration, blocks[i].flags,
495 &data[0], data.size());
497 } else {
498 cb.AddSimpleBlock(blocks[i].track_number, blocks[i].timestamp_in_ms,
499 blocks[i].flags,
500 &data[0], data.size());
504 return unknown_size ? cb.FinishWithUnknownSize() : cb.Finish();
507 scoped_ptr<Cluster> GenerateCluster(
508 std::priority_queue<BlockInfo> block_queue,
509 bool unknown_size) {
510 std::vector<BlockInfo> blocks(block_queue.size());
511 for (size_t i = block_queue.size() - 1; !block_queue.empty(); --i) {
512 blocks[i] = block_queue.top();
513 block_queue.pop();
516 return GenerateCluster(blocks, unknown_size);
519 // |block_descriptions| - The block descriptions used to construct the
520 // cluster. See the documentation for ParseBlockDescriptions() for details on
521 // the string format.
522 void AppendSingleStreamCluster(const std::string& source_id, int track_number,
523 const std::string& block_descriptions) {
524 std::vector<BlockInfo> blocks;
525 ParseBlockDescriptions(track_number, block_descriptions, &blocks);
526 AppendCluster(source_id, GenerateCluster(blocks, false));
529 struct MuxedStreamInfo {
530 MuxedStreamInfo()
531 : track_number(0),
532 block_descriptions("")
535 MuxedStreamInfo(int track_num, const char* block_desc)
536 : track_number(track_num),
537 block_descriptions(block_desc) {
540 int track_number;
541 // The block description passed to ParseBlockDescriptions().
542 // See the documentation for that method for details on the string format.
543 const char* block_descriptions;
546 void AppendMuxedCluster(const MuxedStreamInfo& msi_1,
547 const MuxedStreamInfo& msi_2) {
548 std::vector<MuxedStreamInfo> msi(2);
549 msi[0] = msi_1;
550 msi[1] = msi_2;
551 AppendMuxedCluster(msi);
554 void AppendMuxedCluster(const MuxedStreamInfo& msi_1,
555 const MuxedStreamInfo& msi_2,
556 const MuxedStreamInfo& msi_3) {
557 std::vector<MuxedStreamInfo> msi(3);
558 msi[0] = msi_1;
559 msi[1] = msi_2;
560 msi[2] = msi_3;
561 AppendMuxedCluster(msi);
564 void AppendMuxedCluster(const std::vector<MuxedStreamInfo> msi) {
565 std::priority_queue<BlockInfo> block_queue;
566 for (size_t i = 0; i < msi.size(); ++i) {
567 std::vector<BlockInfo> track_blocks;
568 ParseBlockDescriptions(msi[i].track_number, msi[i].block_descriptions,
569 &track_blocks);
571 for (size_t j = 0; j < track_blocks.size(); ++j)
572 block_queue.push(track_blocks[j]);
575 AppendCluster(kSourceId, GenerateCluster(block_queue, false));
578 void AppendData(const std::string& source_id,
579 const uint8* data, size_t length) {
580 EXPECT_CALL(host_, AddBufferedTimeRange(_, _)).Times(AnyNumber());
582 demuxer_->AppendData(source_id, data, length,
583 append_window_start_for_next_append_,
584 append_window_end_for_next_append_,
585 &timestamp_offset_map_[source_id],
586 init_segment_received_cb_);
589 void AppendDataInPieces(const uint8* data, size_t length) {
590 AppendDataInPieces(data, length, 7);
593 void AppendDataInPieces(const uint8* data, size_t length, size_t piece_size) {
594 const uint8* start = data;
595 const uint8* end = data + length;
596 while (start < end) {
597 size_t append_size = std::min(piece_size,
598 static_cast<size_t>(end - start));
599 AppendData(start, append_size);
600 start += append_size;
604 void AppendInitSegment(int stream_flags) {
605 AppendInitSegmentWithSourceId(kSourceId, stream_flags);
608 void AppendInitSegmentWithSourceId(const std::string& source_id,
609 int stream_flags) {
610 AppendInitSegmentWithEncryptedInfo(source_id, stream_flags, false, false);
613 void AppendInitSegmentWithEncryptedInfo(const std::string& source_id,
614 int stream_flags,
615 bool is_audio_encrypted,
616 bool is_video_encrypted) {
617 scoped_ptr<uint8[]> info_tracks;
618 int info_tracks_size = 0;
619 CreateInitSegment(stream_flags,
620 is_audio_encrypted, is_video_encrypted,
621 &info_tracks, &info_tracks_size);
622 AppendData(source_id, info_tracks.get(), info_tracks_size);
625 void AppendGarbage() {
626 // Fill up an array with gibberish.
627 int garbage_cluster_size = 10;
628 scoped_ptr<uint8[]> garbage_cluster(new uint8[garbage_cluster_size]);
629 for (int i = 0; i < garbage_cluster_size; ++i)
630 garbage_cluster[i] = i;
631 AppendData(garbage_cluster.get(), garbage_cluster_size);
634 void InitDoneCalled(PipelineStatus expected_status,
635 PipelineStatus status) {
636 EXPECT_EQ(status, expected_status);
639 void AppendEmptyCluster(int timecode) {
640 AppendCluster(GenerateEmptyCluster(timecode));
643 PipelineStatusCB CreateInitDoneCB(const base::TimeDelta& expected_duration,
644 PipelineStatus expected_status) {
645 if (expected_duration != kNoTimestamp())
646 EXPECT_CALL(host_, SetDuration(expected_duration));
647 return CreateInitDoneCB(expected_status);
650 PipelineStatusCB CreateInitDoneCB(PipelineStatus expected_status) {
651 return base::Bind(&ChunkDemuxerTest::InitDoneCalled,
652 base::Unretained(this),
653 expected_status);
656 enum StreamFlags {
657 HAS_AUDIO = 1 << 0,
658 HAS_VIDEO = 1 << 1,
659 HAS_TEXT = 1 << 2
662 bool InitDemuxer(int stream_flags) {
663 return InitDemuxerWithEncryptionInfo(stream_flags, false, false);
666 bool InitDemuxerWithEncryptionInfo(
667 int stream_flags, bool is_audio_encrypted, bool is_video_encrypted) {
669 PipelineStatus expected_status =
670 (stream_flags != 0) ? PIPELINE_OK : PIPELINE_ERROR_DECODE;
672 base::TimeDelta expected_duration = kNoTimestamp();
673 if (expected_status == PIPELINE_OK)
674 expected_duration = kDefaultDuration();
676 EXPECT_CALL(*this, DemuxerOpened());
678 // Adding expectation prior to CreateInitDoneCB() here because InSequence
679 // tests require init segment received before duration set. Also, only
680 // expect an init segment received callback if there is actually a track in
681 // it.
682 if (stream_flags != 0)
683 EXPECT_CALL(*this, InitSegmentReceived());
685 demuxer_->Initialize(
686 &host_, CreateInitDoneCB(expected_duration, expected_status), true);
688 if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
689 return false;
691 AppendInitSegmentWithEncryptedInfo(
692 kSourceId, stream_flags,
693 is_audio_encrypted, is_video_encrypted);
694 return true;
697 bool InitDemuxerAudioAndVideoSourcesText(const std::string& audio_id,
698 const std::string& video_id,
699 bool has_text) {
700 EXPECT_CALL(*this, DemuxerOpened());
701 demuxer_->Initialize(
702 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
704 if (AddId(audio_id, HAS_AUDIO) != ChunkDemuxer::kOk)
705 return false;
706 if (AddId(video_id, HAS_VIDEO) != ChunkDemuxer::kOk)
707 return false;
709 int audio_flags = HAS_AUDIO;
710 int video_flags = HAS_VIDEO;
712 if (has_text) {
713 audio_flags |= HAS_TEXT;
714 video_flags |= HAS_TEXT;
717 EXPECT_CALL(*this, InitSegmentReceived());
718 AppendInitSegmentWithSourceId(audio_id, audio_flags);
719 EXPECT_CALL(*this, InitSegmentReceived());
720 AppendInitSegmentWithSourceId(video_id, video_flags);
721 return true;
724 bool InitDemuxerAudioAndVideoSources(const std::string& audio_id,
725 const std::string& video_id) {
726 return InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, false);
729 // Initializes the demuxer with data from 2 files with different
730 // decoder configurations. This is used to test the decoder config change
731 // logic.
733 // bear-320x240.webm VideoDecoderConfig returns 320x240 for its natural_size()
734 // bear-640x360.webm VideoDecoderConfig returns 640x360 for its natural_size()
735 // The resulting video stream returns data from each file for the following
736 // time ranges.
737 // bear-320x240.webm : [0-501) [801-2736)
738 // bear-640x360.webm : [527-793)
740 // bear-320x240.webm AudioDecoderConfig returns 3863 for its extra_data_size()
741 // bear-640x360.webm AudioDecoderConfig returns 3935 for its extra_data_size()
742 // The resulting audio stream returns data from each file for the following
743 // time ranges.
744 // bear-320x240.webm : [0-524) [779-2736)
745 // bear-640x360.webm : [527-759)
746 bool InitDemuxerWithConfigChangeData() {
747 scoped_refptr<DecoderBuffer> bear1 = ReadTestDataFile("bear-320x240.webm");
748 scoped_refptr<DecoderBuffer> bear2 = ReadTestDataFile("bear-640x360.webm");
750 EXPECT_CALL(*this, DemuxerOpened());
752 // Adding expectation prior to CreateInitDoneCB() here because InSequence
753 // tests require init segment received before duration set.
754 EXPECT_CALL(*this, InitSegmentReceived());
755 demuxer_->Initialize(
756 &host_, CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744),
757 PIPELINE_OK), true);
759 if (AddId(kSourceId, HAS_AUDIO | HAS_VIDEO) != ChunkDemuxer::kOk)
760 return false;
762 // Append the whole bear1 file.
763 // TODO(wolenetz/acolwell): Remove this extra SetDuration expectation once
764 // the files are fixed to have the correct duration in their init segments,
765 // and the CreateInitDoneCB() call, above, is fixed to used that duration.
766 // See http://crbug.com/354284.
767 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
768 AppendData(bear1->data(), bear1->data_size());
769 // Last audio frame has timestamp 2721 and duration 24 (estimated from max
770 // seen so far for audio track).
771 // Last video frame has timestamp 2703 and duration 33 (from TrackEntry
772 // DefaultDuration for video track).
773 CheckExpectedRanges(kSourceId, "{ [0,2736) }");
775 // Append initialization segment for bear2.
776 // Note: Offsets here and below are derived from
777 // media/test/data/bear-640x360-manifest.js and
778 // media/test/data/bear-320x240-manifest.js which were
779 // generated from media/test/data/bear-640x360.webm and
780 // media/test/data/bear-320x240.webm respectively.
781 EXPECT_CALL(*this, InitSegmentReceived());
782 AppendData(bear2->data(), 4340);
784 // Append a media segment that goes from [0.527000, 1.014000).
785 AppendData(bear2->data() + 55290, 18785);
786 CheckExpectedRanges(kSourceId, "{ [0,1027) [1201,2736) }");
788 // Append initialization segment for bear1 & fill gap with [779-1197)
789 // segment.
790 EXPECT_CALL(*this, InitSegmentReceived());
791 AppendData(bear1->data(), 4370);
792 AppendData(bear1->data() + 72737, 28183);
793 CheckExpectedRanges(kSourceId, "{ [0,2736) }");
795 MarkEndOfStream(PIPELINE_OK);
796 return true;
799 void ShutdownDemuxer() {
800 if (demuxer_) {
801 demuxer_->Shutdown();
802 message_loop_.RunUntilIdle();
806 void AddSimpleBlock(ClusterBuilder* cb, int track_num, int64 timecode) {
807 uint8 data[] = { 0x00 };
808 cb->AddSimpleBlock(track_num, timecode, 0, data, sizeof(data));
811 scoped_ptr<Cluster> GenerateCluster(int timecode, int block_count) {
812 return GenerateCluster(timecode, timecode, block_count);
815 void AddVideoBlockGroup(ClusterBuilder* cb, int track_num, int64 timecode,
816 int duration, int flags) {
817 const uint8* data =
818 (flags & kWebMFlagKeyframe) != 0 ? kVP8Keyframe : kVP8Interframe;
819 int size = (flags & kWebMFlagKeyframe) != 0 ? sizeof(kVP8Keyframe) :
820 sizeof(kVP8Interframe);
821 cb->AddBlockGroup(track_num, timecode, duration, flags, data, size);
824 scoped_ptr<Cluster> GenerateCluster(int first_audio_timecode,
825 int first_video_timecode,
826 int block_count) {
827 return GenerateCluster(first_audio_timecode, first_video_timecode,
828 block_count, false);
830 scoped_ptr<Cluster> GenerateCluster(int first_audio_timecode,
831 int first_video_timecode,
832 int block_count,
833 bool unknown_size) {
834 CHECK_GT(block_count, 0);
836 std::priority_queue<BlockInfo> block_queue;
838 if (block_count == 1) {
839 block_queue.push(BlockInfo(kAudioTrackNum,
840 first_audio_timecode,
841 kWebMFlagKeyframe,
842 kAudioBlockDuration));
843 return GenerateCluster(block_queue, unknown_size);
846 int audio_timecode = first_audio_timecode;
847 int video_timecode = first_video_timecode;
849 // Create simple blocks for everything except the last 2 blocks.
850 // The first video frame must be a key frame.
851 uint8 video_flag = kWebMFlagKeyframe;
852 for (int i = 0; i < block_count - 2; i++) {
853 if (audio_timecode <= video_timecode) {
854 block_queue.push(BlockInfo(kAudioTrackNum,
855 audio_timecode,
856 kWebMFlagKeyframe,
857 0));
858 audio_timecode += kAudioBlockDuration;
859 continue;
862 block_queue.push(BlockInfo(kVideoTrackNum,
863 video_timecode,
864 video_flag,
865 0));
866 video_timecode += kVideoBlockDuration;
867 video_flag = 0;
870 // Make the last 2 blocks BlockGroups so that they don't get delayed by the
871 // block duration calculation logic.
872 block_queue.push(BlockInfo(kAudioTrackNum,
873 audio_timecode,
874 kWebMFlagKeyframe,
875 kAudioBlockDuration));
876 block_queue.push(BlockInfo(kVideoTrackNum,
877 video_timecode,
878 video_flag,
879 kVideoBlockDuration));
881 return GenerateCluster(block_queue, unknown_size);
884 scoped_ptr<Cluster> GenerateSingleStreamCluster(int timecode,
885 int end_timecode,
886 int track_number,
887 int block_duration) {
888 CHECK_GT(end_timecode, timecode);
890 std::vector<uint8> data(kBlockSize);
892 ClusterBuilder cb;
893 cb.SetClusterTimecode(timecode);
895 // Create simple blocks for everything except the last block.
896 while (timecode < (end_timecode - block_duration)) {
897 cb.AddSimpleBlock(track_number, timecode, kWebMFlagKeyframe,
898 &data[0], data.size());
899 timecode += block_duration;
902 if (track_number == kVideoTrackNum) {
903 AddVideoBlockGroup(&cb, track_number, timecode, block_duration,
904 kWebMFlagKeyframe);
905 } else {
906 cb.AddBlockGroup(track_number, timecode, block_duration,
907 kWebMFlagKeyframe, &data[0], data.size());
910 return cb.Finish();
913 void Read(DemuxerStream::Type type, const DemuxerStream::ReadCB& read_cb) {
914 demuxer_->GetStream(type)->Read(read_cb);
915 message_loop_.RunUntilIdle();
918 void ReadAudio(const DemuxerStream::ReadCB& read_cb) {
919 Read(DemuxerStream::AUDIO, read_cb);
922 void ReadVideo(const DemuxerStream::ReadCB& read_cb) {
923 Read(DemuxerStream::VIDEO, read_cb);
926 void GenerateExpectedReads(int timecode, int block_count) {
927 GenerateExpectedReads(timecode, timecode, block_count);
930 void GenerateExpectedReads(int start_audio_timecode,
931 int start_video_timecode,
932 int block_count) {
933 CHECK_GT(block_count, 0);
935 if (block_count == 1) {
936 ExpectRead(DemuxerStream::AUDIO, start_audio_timecode);
937 return;
940 int audio_timecode = start_audio_timecode;
941 int video_timecode = start_video_timecode;
943 for (int i = 0; i < block_count; i++) {
944 if (audio_timecode <= video_timecode) {
945 ExpectRead(DemuxerStream::AUDIO, audio_timecode);
946 audio_timecode += kAudioBlockDuration;
947 continue;
950 ExpectRead(DemuxerStream::VIDEO, video_timecode);
951 video_timecode += kVideoBlockDuration;
955 void GenerateSingleStreamExpectedReads(int timecode,
956 int block_count,
957 DemuxerStream::Type type,
958 int block_duration) {
959 CHECK_GT(block_count, 0);
960 int stream_timecode = timecode;
962 for (int i = 0; i < block_count; i++) {
963 ExpectRead(type, stream_timecode);
964 stream_timecode += block_duration;
968 void GenerateAudioStreamExpectedReads(int timecode, int block_count) {
969 GenerateSingleStreamExpectedReads(
970 timecode, block_count, DemuxerStream::AUDIO, kAudioBlockDuration);
973 void GenerateVideoStreamExpectedReads(int timecode, int block_count) {
974 GenerateSingleStreamExpectedReads(
975 timecode, block_count, DemuxerStream::VIDEO, kVideoBlockDuration);
978 scoped_ptr<Cluster> GenerateEmptyCluster(int timecode) {
979 ClusterBuilder cb;
980 cb.SetClusterTimecode(timecode);
981 return cb.Finish();
984 void CheckExpectedRanges(const std::string& expected) {
985 CheckExpectedRanges(kSourceId, expected);
988 void CheckExpectedRanges(const std::string& id,
989 const std::string& expected) {
990 CheckExpectedRanges(demuxer_->GetBufferedRanges(id), expected);
993 void CheckExpectedRanges(DemuxerStream::Type type,
994 const std::string& expected) {
995 ChunkDemuxerStream* stream =
996 static_cast<ChunkDemuxerStream*>(demuxer_->GetStream(type));
997 CheckExpectedRanges(stream->GetBufferedRanges(kDefaultDuration()),
998 expected);
1001 void CheckExpectedRanges(const Ranges<base::TimeDelta>& r,
1002 const std::string& expected) {
1003 std::stringstream ss;
1004 ss << "{ ";
1005 for (size_t i = 0; i < r.size(); ++i) {
1006 ss << "[" << r.start(i).InMilliseconds() << ","
1007 << r.end(i).InMilliseconds() << ") ";
1009 ss << "}";
1010 EXPECT_EQ(expected, ss.str());
1013 MOCK_METHOD2(ReadDone, void(DemuxerStream::Status status,
1014 const scoped_refptr<DecoderBuffer>&));
1016 void StoreStatusAndBuffer(DemuxerStream::Status* status_out,
1017 scoped_refptr<DecoderBuffer>* buffer_out,
1018 DemuxerStream::Status status,
1019 const scoped_refptr<DecoderBuffer>& buffer) {
1020 *status_out = status;
1021 *buffer_out = buffer;
1024 void ReadUntilNotOkOrEndOfStream(DemuxerStream::Type type,
1025 DemuxerStream::Status* status,
1026 base::TimeDelta* last_timestamp) {
1027 DemuxerStream* stream = demuxer_->GetStream(type);
1028 scoped_refptr<DecoderBuffer> buffer;
1030 *last_timestamp = kNoTimestamp();
1031 do {
1032 stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
1033 base::Unretained(this), status, &buffer));
1034 base::MessageLoop::current()->RunUntilIdle();
1035 if (*status == DemuxerStream::kOk && !buffer->end_of_stream())
1036 *last_timestamp = buffer->timestamp();
1037 } while (*status == DemuxerStream::kOk && !buffer->end_of_stream());
1040 void ExpectEndOfStream(DemuxerStream::Type type) {
1041 EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk, IsEndOfStream()));
1042 demuxer_->GetStream(type)->Read(base::Bind(
1043 &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
1044 message_loop_.RunUntilIdle();
1047 void ExpectRead(DemuxerStream::Type type, int64 timestamp_in_ms) {
1048 EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk,
1049 HasTimestamp(timestamp_in_ms)));
1050 demuxer_->GetStream(type)->Read(base::Bind(
1051 &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
1052 message_loop_.RunUntilIdle();
1055 void ExpectConfigChanged(DemuxerStream::Type type) {
1056 EXPECT_CALL(*this, ReadDone(DemuxerStream::kConfigChanged, _));
1057 demuxer_->GetStream(type)->Read(base::Bind(
1058 &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
1059 message_loop_.RunUntilIdle();
1062 void CheckExpectedBuffers(DemuxerStream* stream,
1063 const std::string& expected) {
1064 std::vector<std::string> timestamps;
1065 base::SplitString(expected, ' ', &timestamps);
1066 std::stringstream ss;
1067 for (size_t i = 0; i < timestamps.size(); ++i) {
1068 // Initialize status to kAborted since it's possible for Read() to return
1069 // without calling StoreStatusAndBuffer() if it doesn't have any buffers
1070 // left to return.
1071 DemuxerStream::Status status = DemuxerStream::kAborted;
1072 scoped_refptr<DecoderBuffer> buffer;
1073 stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
1074 base::Unretained(this), &status, &buffer));
1075 base::MessageLoop::current()->RunUntilIdle();
1076 if (status != DemuxerStream::kOk || buffer->end_of_stream())
1077 break;
1079 if (i > 0)
1080 ss << " ";
1081 ss << buffer->timestamp().InMilliseconds();
1083 if (buffer->is_key_frame())
1084 ss << "K";
1086 // Handle preroll buffers.
1087 if (EndsWith(timestamps[i], "P", true)) {
1088 ASSERT_EQ(kInfiniteDuration(), buffer->discard_padding().first);
1089 ASSERT_EQ(base::TimeDelta(), buffer->discard_padding().second);
1090 ss << "P";
1093 EXPECT_EQ(expected, ss.str());
1096 MOCK_METHOD1(Checkpoint, void(int id));
1098 struct BufferTimestamps {
1099 int video_time_ms;
1100 int audio_time_ms;
1102 static const int kSkip = -1;
1104 // Test parsing a WebM file.
1105 // |filename| - The name of the file in media/test/data to parse.
1106 // |timestamps| - The expected timestamps on the parsed buffers.
1107 // a timestamp of kSkip indicates that a Read() call for that stream
1108 // shouldn't be made on that iteration of the loop. If both streams have
1109 // a kSkip then the loop will terminate.
1110 bool ParseWebMFile(const std::string& filename,
1111 const BufferTimestamps* timestamps,
1112 const base::TimeDelta& duration) {
1113 return ParseWebMFile(filename, timestamps, duration, HAS_AUDIO | HAS_VIDEO);
1116 bool ParseWebMFile(const std::string& filename,
1117 const BufferTimestamps* timestamps,
1118 const base::TimeDelta& duration,
1119 int stream_flags) {
1120 EXPECT_CALL(*this, DemuxerOpened());
1121 demuxer_->Initialize(
1122 &host_, CreateInitDoneCB(duration, PIPELINE_OK), true);
1124 if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
1125 return false;
1127 // Read a WebM file into memory and send the data to the demuxer.
1128 scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(filename);
1129 EXPECT_CALL(*this, InitSegmentReceived());
1130 AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
1132 // Verify that the timestamps on the first few packets match what we
1133 // expect.
1134 for (size_t i = 0;
1135 (timestamps[i].audio_time_ms != kSkip ||
1136 timestamps[i].video_time_ms != kSkip);
1137 i++) {
1138 bool audio_read_done = false;
1139 bool video_read_done = false;
1141 if (timestamps[i].audio_time_ms != kSkip) {
1142 ReadAudio(base::Bind(&OnReadDone,
1143 base::TimeDelta::FromMilliseconds(
1144 timestamps[i].audio_time_ms),
1145 &audio_read_done));
1146 EXPECT_TRUE(audio_read_done);
1149 if (timestamps[i].video_time_ms != kSkip) {
1150 ReadVideo(base::Bind(&OnReadDone,
1151 base::TimeDelta::FromMilliseconds(
1152 timestamps[i].video_time_ms),
1153 &video_read_done));
1154 EXPECT_TRUE(video_read_done);
1158 return true;
1161 MOCK_METHOD0(DemuxerOpened, void());
1162 MOCK_METHOD2(OnEncryptedMediaInitData,
1163 void(const std::string& init_data_type,
1164 const std::vector<uint8>& init_data));
1166 MOCK_METHOD0(InitSegmentReceived, void(void));
1168 void Seek(base::TimeDelta seek_time) {
1169 demuxer_->StartWaitingForSeek(seek_time);
1170 demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
1171 message_loop_.RunUntilIdle();
1174 void MarkEndOfStream(PipelineStatus status) {
1175 demuxer_->MarkEndOfStream(status);
1176 message_loop_.RunUntilIdle();
1179 bool SetTimestampOffset(const std::string& id,
1180 base::TimeDelta timestamp_offset) {
1181 if (demuxer_->IsParsingMediaSegment(id))
1182 return false;
1184 timestamp_offset_map_[id] = timestamp_offset;
1185 return true;
1188 base::MessageLoop message_loop_;
1189 MockDemuxerHost host_;
1191 scoped_ptr<ChunkDemuxer> demuxer_;
1192 ChunkDemuxer::InitSegmentReceivedCB init_segment_received_cb_;
1194 base::TimeDelta append_window_start_for_next_append_;
1195 base::TimeDelta append_window_end_for_next_append_;
1197 // Map of source id to timestamp offset to use for the next AppendData()
1198 // operation for that source id.
1199 std::map<std::string, base::TimeDelta> timestamp_offset_map_;
1201 private:
1202 DISALLOW_COPY_AND_ASSIGN(ChunkDemuxerTest);
1205 TEST_F(ChunkDemuxerTest, Init) {
1206 // Test no streams, audio-only, video-only, and audio & video scenarios.
1207 // Audio and video streams can be encrypted or not encrypted.
1208 for (int i = 0; i < 16; i++) {
1209 bool has_audio = (i & 0x1) != 0;
1210 bool has_video = (i & 0x2) != 0;
1211 bool is_audio_encrypted = (i & 0x4) != 0;
1212 bool is_video_encrypted = (i & 0x8) != 0;
1214 // No test on invalid combination.
1215 if ((!has_audio && is_audio_encrypted) ||
1216 (!has_video && is_video_encrypted)) {
1217 continue;
1220 CreateNewDemuxer();
1222 if (is_audio_encrypted || is_video_encrypted) {
1223 int need_key_count = (is_audio_encrypted ? 1 : 0) +
1224 (is_video_encrypted ? 1 : 0);
1225 EXPECT_CALL(*this, OnEncryptedMediaInitData(
1226 kWebMInitDataType,
1227 std::vector<uint8>(
1228 kEncryptedMediaInitData,
1229 kEncryptedMediaInitData +
1230 arraysize(kEncryptedMediaInitData))))
1231 .Times(Exactly(need_key_count));
1234 int stream_flags = 0;
1235 if (has_audio)
1236 stream_flags |= HAS_AUDIO;
1238 if (has_video)
1239 stream_flags |= HAS_VIDEO;
1241 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1242 stream_flags, is_audio_encrypted, is_video_encrypted));
1244 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1245 if (has_audio) {
1246 ASSERT_TRUE(audio_stream);
1248 const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
1249 EXPECT_EQ(kCodecVorbis, config.codec());
1250 EXPECT_EQ(32, config.bits_per_channel());
1251 EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
1252 EXPECT_EQ(44100, config.samples_per_second());
1253 EXPECT_TRUE(config.extra_data());
1254 EXPECT_GT(config.extra_data_size(), 0u);
1255 EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
1256 EXPECT_EQ(is_audio_encrypted,
1257 audio_stream->audio_decoder_config().is_encrypted());
1258 EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
1259 ->supports_partial_append_window_trimming());
1260 } else {
1261 EXPECT_FALSE(audio_stream);
1264 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1265 if (has_video) {
1266 EXPECT_TRUE(video_stream);
1267 EXPECT_EQ(is_video_encrypted,
1268 video_stream->video_decoder_config().is_encrypted());
1269 EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
1270 ->supports_partial_append_window_trimming());
1271 } else {
1272 EXPECT_FALSE(video_stream);
1275 ShutdownDemuxer();
1276 demuxer_.reset();
1280 // TODO(acolwell): Fold this test into Init tests since the tests are
1281 // almost identical.
1282 TEST_F(ChunkDemuxerTest, InitText) {
1283 // Test with 1 video stream and 1 text streams, and 0 or 1 audio streams.
1284 // No encryption cases handled here.
1285 bool has_video = true;
1286 bool is_audio_encrypted = false;
1287 bool is_video_encrypted = false;
1288 for (int i = 0; i < 2; i++) {
1289 bool has_audio = (i & 0x1) != 0;
1291 CreateNewDemuxer();
1293 DemuxerStream* text_stream = NULL;
1294 TextTrackConfig text_config;
1295 EXPECT_CALL(host_, AddTextStream(_, _))
1296 .WillOnce(DoAll(SaveArg<0>(&text_stream),
1297 SaveArg<1>(&text_config)));
1299 int stream_flags = HAS_TEXT;
1300 if (has_audio)
1301 stream_flags |= HAS_AUDIO;
1303 if (has_video)
1304 stream_flags |= HAS_VIDEO;
1306 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1307 stream_flags, is_audio_encrypted, is_video_encrypted));
1308 ASSERT_TRUE(text_stream);
1309 EXPECT_EQ(DemuxerStream::TEXT, text_stream->type());
1310 EXPECT_EQ(kTextSubtitles, text_config.kind());
1311 EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(text_stream)
1312 ->supports_partial_append_window_trimming());
1314 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1315 if (has_audio) {
1316 ASSERT_TRUE(audio_stream);
1318 const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
1319 EXPECT_EQ(kCodecVorbis, config.codec());
1320 EXPECT_EQ(32, config.bits_per_channel());
1321 EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
1322 EXPECT_EQ(44100, config.samples_per_second());
1323 EXPECT_TRUE(config.extra_data());
1324 EXPECT_GT(config.extra_data_size(), 0u);
1325 EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
1326 EXPECT_EQ(is_audio_encrypted,
1327 audio_stream->audio_decoder_config().is_encrypted());
1328 EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
1329 ->supports_partial_append_window_trimming());
1330 } else {
1331 EXPECT_FALSE(audio_stream);
1334 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1335 if (has_video) {
1336 EXPECT_TRUE(video_stream);
1337 EXPECT_EQ(is_video_encrypted,
1338 video_stream->video_decoder_config().is_encrypted());
1339 EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
1340 ->supports_partial_append_window_trimming());
1341 } else {
1342 EXPECT_FALSE(video_stream);
1345 ShutdownDemuxer();
1346 demuxer_.reset();
1350 TEST_F(ChunkDemuxerTest, SingleTextTrackIdChange) {
1351 // Test with 1 video stream, 1 audio, and 1 text stream. Send a second init
1352 // segment in which the text track ID changes. Verify appended buffers before
1353 // and after the second init segment map to the same underlying track buffers.
1354 CreateNewDemuxer();
1355 DemuxerStream* text_stream = NULL;
1356 TextTrackConfig text_config;
1357 EXPECT_CALL(host_, AddTextStream(_, _))
1358 .WillOnce(DoAll(SaveArg<0>(&text_stream),
1359 SaveArg<1>(&text_config)));
1360 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1361 HAS_TEXT | HAS_AUDIO | HAS_VIDEO, false, false));
1362 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1363 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1364 ASSERT_TRUE(audio_stream);
1365 ASSERT_TRUE(video_stream);
1366 ASSERT_TRUE(text_stream);
1368 AppendMuxedCluster(
1369 MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
1370 MuxedStreamInfo(kVideoTrackNum, "0K 30"),
1371 MuxedStreamInfo(kTextTrackNum, "10K"));
1372 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1374 scoped_ptr<uint8[]> info_tracks;
1375 int info_tracks_size = 0;
1376 CreateInitSegmentWithAlternateTextTrackNum(HAS_TEXT | HAS_AUDIO | HAS_VIDEO,
1377 false, false,
1378 &info_tracks, &info_tracks_size);
1379 EXPECT_CALL(*this, InitSegmentReceived());
1380 demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
1381 append_window_start_for_next_append_,
1382 append_window_end_for_next_append_,
1383 &timestamp_offset_map_[kSourceId],
1384 init_segment_received_cb_);
1386 AppendMuxedCluster(
1387 MuxedStreamInfo(kAudioTrackNum, "46K 69K"),
1388 MuxedStreamInfo(kVideoTrackNum, "60K"),
1389 MuxedStreamInfo(kAlternateTextTrackNum, "45K"));
1391 CheckExpectedRanges(kSourceId, "{ [0,92) }");
1392 CheckExpectedBuffers(audio_stream, "0K 23K 46K 69K");
1393 CheckExpectedBuffers(video_stream, "0K 30 60K");
1394 CheckExpectedBuffers(text_stream, "10K 45K");
1396 ShutdownDemuxer();
1399 TEST_F(ChunkDemuxerTest, InitSegmentSetsNeedRandomAccessPointFlag) {
1400 // Tests that non-key-frames following an init segment are allowed
1401 // and dropped, as expected if the initialization segment received
1402 // algorithm correctly sets the needs random access point flag to true for all
1403 // track buffers. Note that the first initialization segment is insufficient
1404 // to fully test this since needs random access point flag initializes to
1405 // true.
1406 CreateNewDemuxer();
1407 DemuxerStream* text_stream = NULL;
1408 EXPECT_CALL(host_, AddTextStream(_, _))
1409 .WillOnce(SaveArg<0>(&text_stream));
1410 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1411 HAS_TEXT | HAS_AUDIO | HAS_VIDEO, false, false));
1412 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1413 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1414 ASSERT_TRUE(audio_stream && video_stream && text_stream);
1416 AppendMuxedCluster(
1417 MuxedStreamInfo(kAudioTrackNum, "23K"),
1418 MuxedStreamInfo(kVideoTrackNum, "0 30K"),
1419 MuxedStreamInfo(kTextTrackNum, "25K 40K"));
1420 CheckExpectedRanges(kSourceId, "{ [23,46) }");
1422 EXPECT_CALL(*this, InitSegmentReceived());
1423 AppendInitSegment(HAS_TEXT | HAS_AUDIO | HAS_VIDEO);
1424 AppendMuxedCluster(
1425 MuxedStreamInfo(kAudioTrackNum, "46K 69K"),
1426 MuxedStreamInfo(kVideoTrackNum, "60 90K"),
1427 MuxedStreamInfo(kTextTrackNum, "80K 90K"));
1428 CheckExpectedRanges(kSourceId, "{ [23,92) }");
1430 CheckExpectedBuffers(audio_stream, "23K 46K 69K");
1431 CheckExpectedBuffers(video_stream, "30K 90K");
1432 CheckExpectedBuffers(text_stream, "25K 40K 80K 90K");
1435 // Make sure that the demuxer reports an error if Shutdown()
1436 // is called before all the initialization segments are appended.
1437 TEST_F(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppended) {
1438 EXPECT_CALL(*this, DemuxerOpened());
1439 demuxer_->Initialize(
1440 &host_, CreateInitDoneCB(
1441 kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1443 EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
1444 EXPECT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
1446 EXPECT_CALL(*this, InitSegmentReceived());
1447 AppendInitSegmentWithSourceId("audio", HAS_AUDIO);
1449 ShutdownDemuxer();
1452 TEST_F(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppendedText) {
1453 EXPECT_CALL(*this, DemuxerOpened());
1454 demuxer_->Initialize(
1455 &host_, CreateInitDoneCB(
1456 kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1458 EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
1459 EXPECT_EQ(AddId("video_and_text", HAS_VIDEO), ChunkDemuxer::kOk);
1461 EXPECT_CALL(host_, AddTextStream(_, _))
1462 .Times(Exactly(1));
1464 EXPECT_CALL(*this, InitSegmentReceived());
1465 AppendInitSegmentWithSourceId("video_and_text", HAS_VIDEO | HAS_TEXT);
1467 ShutdownDemuxer();
1470 // Verifies that all streams waiting for data receive an end of stream
1471 // buffer when Shutdown() is called.
1472 TEST_F(ChunkDemuxerTest, Shutdown_EndOfStreamWhileWaitingForData) {
1473 DemuxerStream* text_stream = NULL;
1474 EXPECT_CALL(host_, AddTextStream(_, _))
1475 .WillOnce(SaveArg<0>(&text_stream));
1476 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
1478 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1479 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1481 bool audio_read_done = false;
1482 bool video_read_done = false;
1483 bool text_read_done = false;
1484 audio_stream->Read(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
1485 video_stream->Read(base::Bind(&OnReadDone_EOSExpected, &video_read_done));
1486 text_stream->Read(base::Bind(&OnReadDone_EOSExpected, &text_read_done));
1487 message_loop_.RunUntilIdle();
1489 EXPECT_FALSE(audio_read_done);
1490 EXPECT_FALSE(video_read_done);
1491 EXPECT_FALSE(text_read_done);
1493 ShutdownDemuxer();
1495 EXPECT_TRUE(audio_read_done);
1496 EXPECT_TRUE(video_read_done);
1497 EXPECT_TRUE(text_read_done);
1500 // Test that Seek() completes successfully when the first cluster
1501 // arrives.
1502 TEST_F(ChunkDemuxerTest, AppendDataAfterSeek) {
1503 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1504 AppendCluster(kDefaultFirstCluster());
1506 InSequence s;
1508 EXPECT_CALL(*this, Checkpoint(1));
1510 Seek(base::TimeDelta::FromMilliseconds(46));
1512 EXPECT_CALL(*this, Checkpoint(2));
1514 Checkpoint(1);
1516 AppendCluster(kDefaultSecondCluster());
1518 message_loop_.RunUntilIdle();
1520 Checkpoint(2);
1523 // Test that parsing errors are handled for clusters appended after init.
1524 TEST_F(ChunkDemuxerTest, ErrorWhileParsingClusterAfterInit) {
1525 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1526 AppendCluster(kDefaultFirstCluster());
1528 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1529 AppendGarbage();
1532 // Test the case where a Seek() is requested while the parser
1533 // is in the middle of cluster. This is to verify that the parser
1534 // does not reset itself on a seek.
1535 TEST_F(ChunkDemuxerTest, SeekWhileParsingCluster) {
1536 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1538 InSequence s;
1540 scoped_ptr<Cluster> cluster_a(GenerateCluster(0, 6));
1542 // Split the cluster into two appends at an arbitrary point near the end.
1543 int first_append_size = cluster_a->size() - 11;
1544 int second_append_size = cluster_a->size() - first_append_size;
1546 // Append the first part of the cluster.
1547 AppendData(cluster_a->data(), first_append_size);
1549 ExpectRead(DemuxerStream::AUDIO, 0);
1550 ExpectRead(DemuxerStream::VIDEO, 0);
1551 ExpectRead(DemuxerStream::AUDIO, kAudioBlockDuration);
1553 Seek(base::TimeDelta::FromSeconds(5));
1555 // Append the rest of the cluster.
1556 AppendData(cluster_a->data() + first_append_size, second_append_size);
1558 // Append the new cluster and verify that only the blocks
1559 // in the new cluster are returned.
1560 AppendCluster(GenerateCluster(5000, 6));
1561 GenerateExpectedReads(5000, 6);
1564 // Test the case where AppendData() is called before Init().
1565 TEST_F(ChunkDemuxerTest, AppendDataBeforeInit) {
1566 scoped_ptr<uint8[]> info_tracks;
1567 int info_tracks_size = 0;
1568 CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
1569 false, false, &info_tracks, &info_tracks_size);
1570 demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
1571 append_window_start_for_next_append_,
1572 append_window_end_for_next_append_,
1573 &timestamp_offset_map_[kSourceId],
1574 init_segment_received_cb_);
1577 // Make sure Read() callbacks are dispatched with the proper data.
1578 TEST_F(ChunkDemuxerTest, Read) {
1579 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1581 AppendCluster(kDefaultFirstCluster());
1583 bool audio_read_done = false;
1584 bool video_read_done = false;
1585 ReadAudio(base::Bind(&OnReadDone,
1586 base::TimeDelta::FromMilliseconds(0),
1587 &audio_read_done));
1588 ReadVideo(base::Bind(&OnReadDone,
1589 base::TimeDelta::FromMilliseconds(0),
1590 &video_read_done));
1592 EXPECT_TRUE(audio_read_done);
1593 EXPECT_TRUE(video_read_done);
1596 TEST_F(ChunkDemuxerTest, OutOfOrderClusters) {
1597 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1598 AppendCluster(kDefaultFirstCluster());
1599 AppendCluster(GenerateCluster(10, 4));
1601 // Make sure that AppendCluster() does not fail with a cluster that has
1602 // overlaps with the previously appended cluster.
1603 AppendCluster(GenerateCluster(5, 4));
1605 // Verify that AppendData() can still accept more data.
1606 scoped_ptr<Cluster> cluster_c(GenerateCluster(45, 2));
1607 demuxer_->AppendData(kSourceId, cluster_c->data(), cluster_c->size(),
1608 append_window_start_for_next_append_,
1609 append_window_end_for_next_append_,
1610 &timestamp_offset_map_[kSourceId],
1611 init_segment_received_cb_);
1614 TEST_F(ChunkDemuxerTest, NonMonotonicButAboveClusterTimecode) {
1615 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1616 AppendCluster(kDefaultFirstCluster());
1618 ClusterBuilder cb;
1620 // Test the case where block timecodes are not monotonically
1621 // increasing but stay above the cluster timecode.
1622 cb.SetClusterTimecode(5);
1623 AddSimpleBlock(&cb, kAudioTrackNum, 5);
1624 AddSimpleBlock(&cb, kVideoTrackNum, 10);
1625 AddSimpleBlock(&cb, kAudioTrackNum, 7);
1626 AddSimpleBlock(&cb, kVideoTrackNum, 15);
1628 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1629 AppendCluster(cb.Finish());
1631 // Verify that AppendData() ignores data after the error.
1632 scoped_ptr<Cluster> cluster_b(GenerateCluster(20, 2));
1633 demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
1634 append_window_start_for_next_append_,
1635 append_window_end_for_next_append_,
1636 &timestamp_offset_map_[kSourceId],
1637 init_segment_received_cb_);
1640 TEST_F(ChunkDemuxerTest, BackwardsAndBeforeClusterTimecode) {
1641 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1642 AppendCluster(kDefaultFirstCluster());
1644 ClusterBuilder cb;
1646 // Test timecodes going backwards and including values less than the cluster
1647 // timecode.
1648 cb.SetClusterTimecode(5);
1649 AddSimpleBlock(&cb, kAudioTrackNum, 5);
1650 AddSimpleBlock(&cb, kVideoTrackNum, 5);
1651 AddSimpleBlock(&cb, kAudioTrackNum, 3);
1652 AddSimpleBlock(&cb, kVideoTrackNum, 3);
1654 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1655 AppendCluster(cb.Finish());
1657 // Verify that AppendData() ignores data after the error.
1658 scoped_ptr<Cluster> cluster_b(GenerateCluster(6, 2));
1659 demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
1660 append_window_start_for_next_append_,
1661 append_window_end_for_next_append_,
1662 &timestamp_offset_map_[kSourceId],
1663 init_segment_received_cb_);
1667 TEST_F(ChunkDemuxerTest, PerStreamMonotonicallyIncreasingTimestamps) {
1668 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1669 AppendCluster(kDefaultFirstCluster());
1671 ClusterBuilder cb;
1673 // Test monotonic increasing timestamps on a per stream
1674 // basis.
1675 cb.SetClusterTimecode(5);
1676 AddSimpleBlock(&cb, kAudioTrackNum, 5);
1677 AddSimpleBlock(&cb, kVideoTrackNum, 5);
1678 AddSimpleBlock(&cb, kAudioTrackNum, 4);
1679 AddSimpleBlock(&cb, kVideoTrackNum, 7);
1681 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1682 AppendCluster(cb.Finish());
1685 // Test the case where a cluster is passed to AppendCluster() before
1686 // INFO & TRACKS data.
1687 TEST_F(ChunkDemuxerTest, ClusterBeforeInitSegment) {
1688 EXPECT_CALL(*this, DemuxerOpened());
1689 demuxer_->Initialize(
1690 &host_, NewExpectedStatusCB(PIPELINE_ERROR_DECODE), true);
1692 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1694 AppendCluster(GenerateCluster(0, 1));
1697 // Test cases where we get an MarkEndOfStream() call during initialization.
1698 TEST_F(ChunkDemuxerTest, EOSDuringInit) {
1699 EXPECT_CALL(*this, DemuxerOpened());
1700 demuxer_->Initialize(
1701 &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1702 MarkEndOfStream(PIPELINE_OK);
1705 TEST_F(ChunkDemuxerTest, EndOfStreamWithNoAppend) {
1706 EXPECT_CALL(*this, DemuxerOpened());
1707 demuxer_->Initialize(
1708 &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1710 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1712 CheckExpectedRanges("{ }");
1713 MarkEndOfStream(PIPELINE_OK);
1714 ShutdownDemuxer();
1715 CheckExpectedRanges("{ }");
1716 demuxer_->RemoveId(kSourceId);
1717 demuxer_.reset();
1720 TEST_F(ChunkDemuxerTest, EndOfStreamWithNoMediaAppend) {
1721 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1723 CheckExpectedRanges("{ }");
1724 MarkEndOfStream(PIPELINE_OK);
1725 CheckExpectedRanges("{ }");
1728 TEST_F(ChunkDemuxerTest, DecodeErrorEndOfStream) {
1729 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1731 AppendCluster(kDefaultFirstCluster());
1732 CheckExpectedRanges(kDefaultFirstClusterRange);
1734 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1735 MarkEndOfStream(PIPELINE_ERROR_DECODE);
1736 CheckExpectedRanges(kDefaultFirstClusterRange);
1739 TEST_F(ChunkDemuxerTest, NetworkErrorEndOfStream) {
1740 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1742 AppendCluster(kDefaultFirstCluster());
1743 CheckExpectedRanges(kDefaultFirstClusterRange);
1745 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_NETWORK));
1746 MarkEndOfStream(PIPELINE_ERROR_NETWORK);
1749 // Helper class to reduce duplicate code when testing end of stream
1750 // Read() behavior.
1751 class EndOfStreamHelper {
1752 public:
1753 explicit EndOfStreamHelper(Demuxer* demuxer)
1754 : demuxer_(demuxer),
1755 audio_read_done_(false),
1756 video_read_done_(false) {
1759 // Request a read on the audio and video streams.
1760 void RequestReads() {
1761 EXPECT_FALSE(audio_read_done_);
1762 EXPECT_FALSE(video_read_done_);
1764 DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
1765 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
1767 audio->Read(base::Bind(&OnEndOfStreamReadDone, &audio_read_done_));
1768 video->Read(base::Bind(&OnEndOfStreamReadDone, &video_read_done_));
1769 base::MessageLoop::current()->RunUntilIdle();
1772 // Check to see if |audio_read_done_| and |video_read_done_| variables
1773 // match |expected|.
1774 void CheckIfReadDonesWereCalled(bool expected) {
1775 base::MessageLoop::current()->RunUntilIdle();
1776 EXPECT_EQ(expected, audio_read_done_);
1777 EXPECT_EQ(expected, video_read_done_);
1780 private:
1781 static void OnEndOfStreamReadDone(
1782 bool* called,
1783 DemuxerStream::Status status,
1784 const scoped_refptr<DecoderBuffer>& buffer) {
1785 EXPECT_EQ(status, DemuxerStream::kOk);
1786 EXPECT_TRUE(buffer->end_of_stream());
1787 *called = true;
1790 Demuxer* demuxer_;
1791 bool audio_read_done_;
1792 bool video_read_done_;
1794 DISALLOW_COPY_AND_ASSIGN(EndOfStreamHelper);
1797 // Make sure that all pending reads that we don't have media data for get an
1798 // "end of stream" buffer when MarkEndOfStream() is called.
1799 TEST_F(ChunkDemuxerTest, EndOfStreamWithPendingReads) {
1800 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1802 AppendCluster(GenerateCluster(0, 2));
1804 bool audio_read_done_1 = false;
1805 bool video_read_done_1 = false;
1806 EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
1807 EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
1809 ReadAudio(base::Bind(&OnReadDone,
1810 base::TimeDelta::FromMilliseconds(0),
1811 &audio_read_done_1));
1812 ReadVideo(base::Bind(&OnReadDone,
1813 base::TimeDelta::FromMilliseconds(0),
1814 &video_read_done_1));
1815 message_loop_.RunUntilIdle();
1817 EXPECT_TRUE(audio_read_done_1);
1818 EXPECT_TRUE(video_read_done_1);
1820 end_of_stream_helper_1.RequestReads();
1822 EXPECT_CALL(host_, SetDuration(
1823 base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
1824 MarkEndOfStream(PIPELINE_OK);
1826 end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
1828 end_of_stream_helper_2.RequestReads();
1829 end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
1832 // Make sure that all Read() calls after we get an MarkEndOfStream()
1833 // call return an "end of stream" buffer.
1834 TEST_F(ChunkDemuxerTest, ReadsAfterEndOfStream) {
1835 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1837 AppendCluster(GenerateCluster(0, 2));
1839 bool audio_read_done_1 = false;
1840 bool video_read_done_1 = false;
1841 EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
1842 EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
1843 EndOfStreamHelper end_of_stream_helper_3(demuxer_.get());
1845 ReadAudio(base::Bind(&OnReadDone,
1846 base::TimeDelta::FromMilliseconds(0),
1847 &audio_read_done_1));
1848 ReadVideo(base::Bind(&OnReadDone,
1849 base::TimeDelta::FromMilliseconds(0),
1850 &video_read_done_1));
1852 end_of_stream_helper_1.RequestReads();
1854 EXPECT_TRUE(audio_read_done_1);
1855 EXPECT_TRUE(video_read_done_1);
1856 end_of_stream_helper_1.CheckIfReadDonesWereCalled(false);
1858 EXPECT_CALL(host_, SetDuration(
1859 base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
1860 MarkEndOfStream(PIPELINE_OK);
1862 end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
1864 // Request a few more reads and make sure we immediately get
1865 // end of stream buffers.
1866 end_of_stream_helper_2.RequestReads();
1867 end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
1869 end_of_stream_helper_3.RequestReads();
1870 end_of_stream_helper_3.CheckIfReadDonesWereCalled(true);
1873 TEST_F(ChunkDemuxerTest, EndOfStreamDuringCanceledSeek) {
1874 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1876 AppendCluster(0, 10);
1877 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(138)));
1878 MarkEndOfStream(PIPELINE_OK);
1880 // Start the first seek.
1881 Seek(base::TimeDelta::FromMilliseconds(20));
1883 // Simulate another seek being requested before the first
1884 // seek has finished prerolling.
1885 base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(30);
1886 demuxer_->CancelPendingSeek(seek_time2);
1888 // Finish second seek.
1889 Seek(seek_time2);
1891 DemuxerStream::Status status;
1892 base::TimeDelta last_timestamp;
1894 // Make sure audio can reach end of stream.
1895 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
1896 ASSERT_EQ(status, DemuxerStream::kOk);
1898 // Make sure video can reach end of stream.
1899 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
1900 ASSERT_EQ(status, DemuxerStream::kOk);
1903 // Verify buffered range change behavior for audio/video/text tracks.
1904 TEST_F(ChunkDemuxerTest, EndOfStreamRangeChanges) {
1905 DemuxerStream* text_stream = NULL;
1907 EXPECT_CALL(host_, AddTextStream(_, _))
1908 .WillOnce(SaveArg<0>(&text_stream));
1909 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
1911 AppendMuxedCluster(
1912 MuxedStreamInfo(kVideoTrackNum, "0K 33"),
1913 MuxedStreamInfo(kAudioTrackNum, "0K 23K"));
1915 // Check expected ranges and verify that an empty text track does not
1916 // affect the expected ranges.
1917 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1919 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
1920 MarkEndOfStream(PIPELINE_OK);
1922 // Check expected ranges and verify that an empty text track does not
1923 // affect the expected ranges.
1924 CheckExpectedRanges(kSourceId, "{ [0,66) }");
1926 // Unmark end of stream state and verify that the ranges return to
1927 // their pre-"end of stream" values.
1928 demuxer_->UnmarkEndOfStream();
1929 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1931 // Add text track data and verify that the buffered ranges don't change
1932 // since the intersection of all the tracks doesn't change.
1933 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(200)));
1934 AppendMuxedCluster(
1935 MuxedStreamInfo(kVideoTrackNum, "0K 33"),
1936 MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
1937 MuxedStreamInfo(kTextTrackNum, "0K 100K"));
1938 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1940 // Mark end of stream and verify that text track data is reflected in
1941 // the new range.
1942 MarkEndOfStream(PIPELINE_OK);
1943 CheckExpectedRanges(kSourceId, "{ [0,200) }");
1946 // Make sure AppendData() will accept elements that span multiple calls.
1947 TEST_F(ChunkDemuxerTest, AppendingInPieces) {
1948 EXPECT_CALL(*this, DemuxerOpened());
1949 demuxer_->Initialize(
1950 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
1952 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1954 scoped_ptr<uint8[]> info_tracks;
1955 int info_tracks_size = 0;
1956 CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
1957 false, false, &info_tracks, &info_tracks_size);
1959 scoped_ptr<Cluster> cluster_a(kDefaultFirstCluster());
1960 scoped_ptr<Cluster> cluster_b(kDefaultSecondCluster());
1962 size_t buffer_size = info_tracks_size + cluster_a->size() + cluster_b->size();
1963 scoped_ptr<uint8[]> buffer(new uint8[buffer_size]);
1964 uint8* dst = buffer.get();
1965 memcpy(dst, info_tracks.get(), info_tracks_size);
1966 dst += info_tracks_size;
1968 memcpy(dst, cluster_a->data(), cluster_a->size());
1969 dst += cluster_a->size();
1971 memcpy(dst, cluster_b->data(), cluster_b->size());
1972 dst += cluster_b->size();
1974 EXPECT_CALL(*this, InitSegmentReceived());
1975 AppendDataInPieces(buffer.get(), buffer_size);
1977 GenerateExpectedReads(0, 9);
1980 TEST_F(ChunkDemuxerTest, WebMFile_AudioAndVideo) {
1981 struct BufferTimestamps buffer_timestamps[] = {
1982 {0, 0},
1983 {33, 3},
1984 {67, 6},
1985 {100, 9},
1986 {133, 12},
1987 {kSkip, kSkip},
1990 // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
1991 // ParseWebMFile() call's expected duration, below, once the file is fixed to
1992 // have the correct duration in the init segment. See http://crbug.com/354284.
1993 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
1995 ASSERT_TRUE(ParseWebMFile("bear-320x240.webm", buffer_timestamps,
1996 base::TimeDelta::FromMilliseconds(2744)));
1999 TEST_F(ChunkDemuxerTest, WebMFile_LiveAudioAndVideo) {
2000 struct BufferTimestamps buffer_timestamps[] = {
2001 {0, 0},
2002 {33, 3},
2003 {67, 6},
2004 {100, 9},
2005 {133, 12},
2006 {kSkip, kSkip},
2009 ASSERT_TRUE(ParseWebMFile("bear-320x240-live.webm", buffer_timestamps,
2010 kInfiniteDuration()));
2012 DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
2013 EXPECT_EQ(DemuxerStream::LIVENESS_LIVE, audio->liveness());
2014 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2015 EXPECT_EQ(DemuxerStream::LIVENESS_LIVE, video->liveness());
2018 TEST_F(ChunkDemuxerTest, WebMFile_AudioOnly) {
2019 struct BufferTimestamps buffer_timestamps[] = {
2020 {kSkip, 0},
2021 {kSkip, 3},
2022 {kSkip, 6},
2023 {kSkip, 9},
2024 {kSkip, 12},
2025 {kSkip, kSkip},
2028 // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
2029 // ParseWebMFile() call's expected duration, below, once the file is fixed to
2030 // have the correct duration in the init segment. See http://crbug.com/354284.
2031 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
2033 ASSERT_TRUE(ParseWebMFile("bear-320x240-audio-only.webm", buffer_timestamps,
2034 base::TimeDelta::FromMilliseconds(2744),
2035 HAS_AUDIO));
2038 TEST_F(ChunkDemuxerTest, WebMFile_VideoOnly) {
2039 struct BufferTimestamps buffer_timestamps[] = {
2040 {0, kSkip},
2041 {33, kSkip},
2042 {67, kSkip},
2043 {100, kSkip},
2044 {133, kSkip},
2045 {kSkip, kSkip},
2048 // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
2049 // ParseWebMFile() call's expected duration, below, once the file is fixed to
2050 // have the correct duration in the init segment. See http://crbug.com/354284.
2051 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2736)));
2053 ASSERT_TRUE(ParseWebMFile("bear-320x240-video-only.webm", buffer_timestamps,
2054 base::TimeDelta::FromMilliseconds(2703),
2055 HAS_VIDEO));
2058 TEST_F(ChunkDemuxerTest, WebMFile_AltRefFrames) {
2059 struct BufferTimestamps buffer_timestamps[] = {
2060 {0, 0},
2061 {33, 3},
2062 {33, 6},
2063 {67, 9},
2064 {100, 12},
2065 {kSkip, kSkip},
2068 ASSERT_TRUE(ParseWebMFile("bear-320x240-altref.webm", buffer_timestamps,
2069 base::TimeDelta::FromMilliseconds(2767)));
2072 // Verify that we output buffers before the entire cluster has been parsed.
2073 TEST_F(ChunkDemuxerTest, IncrementalClusterParsing) {
2074 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2075 AppendEmptyCluster(0);
2077 scoped_ptr<Cluster> cluster(GenerateCluster(0, 6));
2079 bool audio_read_done = false;
2080 bool video_read_done = false;
2081 ReadAudio(base::Bind(&OnReadDone,
2082 base::TimeDelta::FromMilliseconds(0),
2083 &audio_read_done));
2084 ReadVideo(base::Bind(&OnReadDone,
2085 base::TimeDelta::FromMilliseconds(0),
2086 &video_read_done));
2088 // Make sure the reads haven't completed yet.
2089 EXPECT_FALSE(audio_read_done);
2090 EXPECT_FALSE(video_read_done);
2092 // Append data one byte at a time until one or both reads complete.
2093 int i = 0;
2094 for (; i < cluster->size() && !(audio_read_done || video_read_done); ++i) {
2095 AppendData(cluster->data() + i, 1);
2096 message_loop_.RunUntilIdle();
2099 EXPECT_TRUE(audio_read_done || video_read_done);
2100 EXPECT_GT(i, 0);
2101 EXPECT_LT(i, cluster->size());
2103 audio_read_done = false;
2104 video_read_done = false;
2105 ReadAudio(base::Bind(&OnReadDone,
2106 base::TimeDelta::FromMilliseconds(23),
2107 &audio_read_done));
2108 ReadVideo(base::Bind(&OnReadDone,
2109 base::TimeDelta::FromMilliseconds(33),
2110 &video_read_done));
2112 // Make sure the reads haven't completed yet.
2113 EXPECT_FALSE(audio_read_done);
2114 EXPECT_FALSE(video_read_done);
2116 // Append the remaining data.
2117 ASSERT_LT(i, cluster->size());
2118 AppendData(cluster->data() + i, cluster->size() - i);
2120 message_loop_.RunUntilIdle();
2122 EXPECT_TRUE(audio_read_done);
2123 EXPECT_TRUE(video_read_done);
2126 TEST_F(ChunkDemuxerTest, ParseErrorDuringInit) {
2127 EXPECT_CALL(*this, DemuxerOpened());
2128 demuxer_->Initialize(
2129 &host_, CreateInitDoneCB(
2130 kNoTimestamp(), PIPELINE_ERROR_DECODE), true);
2132 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
2134 uint8 tmp = 0;
2135 demuxer_->AppendData(kSourceId, &tmp, 1,
2136 append_window_start_for_next_append_,
2137 append_window_end_for_next_append_,
2138 &timestamp_offset_map_[kSourceId],
2139 init_segment_received_cb_);
2142 TEST_F(ChunkDemuxerTest, AVHeadersWithAudioOnlyType) {
2143 EXPECT_CALL(*this, DemuxerOpened());
2144 demuxer_->Initialize(
2145 &host_, CreateInitDoneCB(kNoTimestamp(),
2146 PIPELINE_ERROR_DECODE), true);
2148 std::vector<std::string> codecs(1);
2149 codecs[0] = "vorbis";
2150 ASSERT_EQ(demuxer_->AddId(kSourceId, "audio/webm", codecs),
2151 ChunkDemuxer::kOk);
2153 AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
2156 TEST_F(ChunkDemuxerTest, AVHeadersWithVideoOnlyType) {
2157 EXPECT_CALL(*this, DemuxerOpened());
2158 demuxer_->Initialize(
2159 &host_, CreateInitDoneCB(kNoTimestamp(),
2160 PIPELINE_ERROR_DECODE), true);
2162 std::vector<std::string> codecs(1);
2163 codecs[0] = "vp8";
2164 ASSERT_EQ(demuxer_->AddId(kSourceId, "video/webm", codecs),
2165 ChunkDemuxer::kOk);
2167 AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
2170 TEST_F(ChunkDemuxerTest, MultipleHeaders) {
2171 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2173 AppendCluster(kDefaultFirstCluster());
2175 // Append another identical initialization segment.
2176 EXPECT_CALL(*this, InitSegmentReceived());
2177 AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
2179 AppendCluster(kDefaultSecondCluster());
2181 GenerateExpectedReads(0, 9);
2184 TEST_F(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideo) {
2185 std::string audio_id = "audio1";
2186 std::string video_id = "video1";
2187 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2189 // Append audio and video data into separate source ids.
2190 AppendCluster(audio_id,
2191 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2192 GenerateAudioStreamExpectedReads(0, 4);
2193 AppendCluster(video_id,
2194 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2195 GenerateVideoStreamExpectedReads(0, 4);
2198 TEST_F(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideoText) {
2199 // TODO(matthewjheaney): Here and elsewhere, we need more tests
2200 // for inband text tracks (http://crbug/321455).
2202 std::string audio_id = "audio1";
2203 std::string video_id = "video1";
2205 EXPECT_CALL(host_, AddTextStream(_, _))
2206 .Times(Exactly(2));
2207 ASSERT_TRUE(InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, true));
2209 // Append audio and video data into separate source ids.
2210 AppendCluster(audio_id,
2211 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2212 GenerateAudioStreamExpectedReads(0, 4);
2213 AppendCluster(video_id,
2214 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2215 GenerateVideoStreamExpectedReads(0, 4);
2218 TEST_F(ChunkDemuxerTest, AddIdFailures) {
2219 EXPECT_CALL(*this, DemuxerOpened());
2220 demuxer_->Initialize(
2221 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2223 std::string audio_id = "audio1";
2224 std::string video_id = "video1";
2226 ASSERT_EQ(AddId(audio_id, HAS_AUDIO), ChunkDemuxer::kOk);
2228 // Adding an id with audio/video should fail because we already added audio.
2229 ASSERT_EQ(AddId(), ChunkDemuxer::kReachedIdLimit);
2231 EXPECT_CALL(*this, InitSegmentReceived());
2232 AppendInitSegmentWithSourceId(audio_id, HAS_AUDIO);
2234 // Adding an id after append should fail.
2235 ASSERT_EQ(AddId(video_id, HAS_VIDEO), ChunkDemuxer::kReachedIdLimit);
2238 // Test that Read() calls after a RemoveId() return "end of stream" buffers.
2239 TEST_F(ChunkDemuxerTest, RemoveId) {
2240 std::string audio_id = "audio1";
2241 std::string video_id = "video1";
2242 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2244 // Append audio and video data into separate source ids.
2245 AppendCluster(audio_id,
2246 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2247 AppendCluster(video_id,
2248 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2250 // Read() from audio should return normal buffers.
2251 GenerateAudioStreamExpectedReads(0, 4);
2253 // Remove the audio id.
2254 demuxer_->RemoveId(audio_id);
2256 // Read() from audio should return "end of stream" buffers.
2257 bool audio_read_done = false;
2258 ReadAudio(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
2259 message_loop_.RunUntilIdle();
2260 EXPECT_TRUE(audio_read_done);
2262 // Read() from video should still return normal buffers.
2263 GenerateVideoStreamExpectedReads(0, 4);
2266 // Test that removing an ID immediately after adding it does not interfere with
2267 // quota for new IDs in the future.
2268 TEST_F(ChunkDemuxerTest, RemoveAndAddId) {
2269 std::string audio_id_1 = "audio1";
2270 ASSERT_TRUE(AddId(audio_id_1, HAS_AUDIO) == ChunkDemuxer::kOk);
2271 demuxer_->RemoveId(audio_id_1);
2273 std::string audio_id_2 = "audio2";
2274 ASSERT_TRUE(AddId(audio_id_2, HAS_AUDIO) == ChunkDemuxer::kOk);
2277 TEST_F(ChunkDemuxerTest, SeekCanceled) {
2278 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2280 // Append cluster at the beginning of the stream.
2281 AppendCluster(GenerateCluster(0, 4));
2283 // Seek to an unbuffered region.
2284 Seek(base::TimeDelta::FromSeconds(50));
2286 // Attempt to read in unbuffered area; should not fulfill the read.
2287 bool audio_read_done = false;
2288 bool video_read_done = false;
2289 ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
2290 ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
2291 EXPECT_FALSE(audio_read_done);
2292 EXPECT_FALSE(video_read_done);
2294 // Now cancel the pending seek, which should flush the reads with empty
2295 // buffers.
2296 base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
2297 demuxer_->CancelPendingSeek(seek_time);
2298 message_loop_.RunUntilIdle();
2299 EXPECT_TRUE(audio_read_done);
2300 EXPECT_TRUE(video_read_done);
2302 // A seek back to the buffered region should succeed.
2303 Seek(seek_time);
2304 GenerateExpectedReads(0, 4);
2307 TEST_F(ChunkDemuxerTest, SeekCanceledWhileWaitingForSeek) {
2308 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2310 // Append cluster at the beginning of the stream.
2311 AppendCluster(GenerateCluster(0, 4));
2313 // Start waiting for a seek.
2314 base::TimeDelta seek_time1 = base::TimeDelta::FromSeconds(50);
2315 base::TimeDelta seek_time2 = base::TimeDelta::FromSeconds(0);
2316 demuxer_->StartWaitingForSeek(seek_time1);
2318 // Now cancel the upcoming seek to an unbuffered region.
2319 demuxer_->CancelPendingSeek(seek_time2);
2320 demuxer_->Seek(seek_time1, NewExpectedStatusCB(PIPELINE_OK));
2322 // Read requests should be fulfilled with empty buffers.
2323 bool audio_read_done = false;
2324 bool video_read_done = false;
2325 ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
2326 ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
2327 EXPECT_TRUE(audio_read_done);
2328 EXPECT_TRUE(video_read_done);
2330 // A seek back to the buffered region should succeed.
2331 Seek(seek_time2);
2332 GenerateExpectedReads(0, 4);
2335 // Test that Seek() successfully seeks to all source IDs.
2336 TEST_F(ChunkDemuxerTest, SeekAudioAndVideoSources) {
2337 std::string audio_id = "audio1";
2338 std::string video_id = "video1";
2339 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2341 AppendCluster(
2342 audio_id,
2343 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2344 AppendCluster(
2345 video_id,
2346 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2348 // Read() should return buffers at 0.
2349 bool audio_read_done = false;
2350 bool video_read_done = false;
2351 ReadAudio(base::Bind(&OnReadDone,
2352 base::TimeDelta::FromMilliseconds(0),
2353 &audio_read_done));
2354 ReadVideo(base::Bind(&OnReadDone,
2355 base::TimeDelta::FromMilliseconds(0),
2356 &video_read_done));
2357 EXPECT_TRUE(audio_read_done);
2358 EXPECT_TRUE(video_read_done);
2360 // Seek to 3 (an unbuffered region).
2361 Seek(base::TimeDelta::FromSeconds(3));
2363 audio_read_done = false;
2364 video_read_done = false;
2365 ReadAudio(base::Bind(&OnReadDone,
2366 base::TimeDelta::FromSeconds(3),
2367 &audio_read_done));
2368 ReadVideo(base::Bind(&OnReadDone,
2369 base::TimeDelta::FromSeconds(3),
2370 &video_read_done));
2371 // Read()s should not return until after data is appended at the Seek point.
2372 EXPECT_FALSE(audio_read_done);
2373 EXPECT_FALSE(video_read_done);
2375 AppendCluster(audio_id,
2376 GenerateSingleStreamCluster(
2377 3000, 3092, kAudioTrackNum, kAudioBlockDuration));
2378 AppendCluster(video_id,
2379 GenerateSingleStreamCluster(
2380 3000, 3132, kVideoTrackNum, kVideoBlockDuration));
2382 message_loop_.RunUntilIdle();
2384 // Read() should return buffers at 3.
2385 EXPECT_TRUE(audio_read_done);
2386 EXPECT_TRUE(video_read_done);
2389 // Test that Seek() completes successfully when EndOfStream
2390 // is called before data is available for that seek point.
2391 // This scenario might be useful if seeking past the end of stream
2392 // of either audio or video (or both).
2393 TEST_F(ChunkDemuxerTest, EndOfStreamAfterPastEosSeek) {
2394 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2396 AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
2397 AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
2399 // Seeking past the end of video.
2400 // Note: audio data is available for that seek point.
2401 bool seek_cb_was_called = false;
2402 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(110);
2403 demuxer_->StartWaitingForSeek(seek_time);
2404 demuxer_->Seek(seek_time,
2405 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
2406 message_loop_.RunUntilIdle();
2408 EXPECT_FALSE(seek_cb_was_called);
2410 EXPECT_CALL(host_, SetDuration(
2411 base::TimeDelta::FromMilliseconds(120)));
2412 MarkEndOfStream(PIPELINE_OK);
2413 message_loop_.RunUntilIdle();
2415 EXPECT_TRUE(seek_cb_was_called);
2417 ShutdownDemuxer();
2420 // Test that EndOfStream is ignored if coming during a pending seek
2421 // whose seek time is before some existing ranges.
2422 TEST_F(ChunkDemuxerTest, EndOfStreamDuringPendingSeek) {
2423 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2425 AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
2426 AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
2427 AppendCluster(GenerateSingleStreamCluster(200, 300, kAudioTrackNum, 10));
2428 AppendCluster(GenerateSingleStreamCluster(200, 300, kVideoTrackNum, 5));
2430 bool seek_cb_was_called = false;
2431 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(160);
2432 demuxer_->StartWaitingForSeek(seek_time);
2433 demuxer_->Seek(seek_time,
2434 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
2435 message_loop_.RunUntilIdle();
2437 EXPECT_FALSE(seek_cb_was_called);
2439 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(300)));
2440 MarkEndOfStream(PIPELINE_OK);
2441 message_loop_.RunUntilIdle();
2443 EXPECT_FALSE(seek_cb_was_called);
2445 demuxer_->UnmarkEndOfStream();
2447 AppendCluster(GenerateSingleStreamCluster(140, 180, kAudioTrackNum, 10));
2448 AppendCluster(GenerateSingleStreamCluster(140, 180, kVideoTrackNum, 5));
2450 message_loop_.RunUntilIdle();
2452 EXPECT_TRUE(seek_cb_was_called);
2454 ShutdownDemuxer();
2457 // Test ranges in an audio-only stream.
2458 TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioIdOnly) {
2459 EXPECT_CALL(*this, DemuxerOpened());
2460 demuxer_->Initialize(
2461 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2463 ASSERT_EQ(AddId(kSourceId, HAS_AUDIO), ChunkDemuxer::kOk);
2464 EXPECT_CALL(*this, InitSegmentReceived());
2465 AppendInitSegment(HAS_AUDIO);
2467 // Test a simple cluster.
2468 AppendCluster(
2469 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2471 CheckExpectedRanges("{ [0,92) }");
2473 // Append a disjoint cluster to check for two separate ranges.
2474 AppendCluster(GenerateSingleStreamCluster(
2475 150, 219, kAudioTrackNum, kAudioBlockDuration));
2477 CheckExpectedRanges("{ [0,92) [150,219) }");
2480 // Test ranges in a video-only stream.
2481 TEST_F(ChunkDemuxerTest, GetBufferedRanges_VideoIdOnly) {
2482 EXPECT_CALL(*this, DemuxerOpened());
2483 demuxer_->Initialize(
2484 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2486 ASSERT_EQ(AddId(kSourceId, HAS_VIDEO), ChunkDemuxer::kOk);
2487 EXPECT_CALL(*this, InitSegmentReceived());
2488 AppendInitSegment(HAS_VIDEO);
2490 // Test a simple cluster.
2491 AppendCluster(
2492 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2494 CheckExpectedRanges("{ [0,132) }");
2496 // Append a disjoint cluster to check for two separate ranges.
2497 AppendCluster(GenerateSingleStreamCluster(
2498 200, 299, kVideoTrackNum, kVideoBlockDuration));
2500 CheckExpectedRanges("{ [0,132) [200,299) }");
2503 TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideo) {
2504 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2506 // Audio: 0 -> 23
2507 // Video: 0 -> 33
2508 // Buffered Range: 0 -> 23
2509 // Audio block duration is smaller than video block duration,
2510 // so the buffered ranges should correspond to the audio blocks.
2511 AppendCluster(GenerateSingleStreamCluster(
2512 0, kAudioBlockDuration, kAudioTrackNum, kAudioBlockDuration));
2513 AppendCluster(GenerateSingleStreamCluster(
2514 0, kVideoBlockDuration, kVideoTrackNum, kVideoBlockDuration));
2516 CheckExpectedRanges("{ [0,23) }");
2518 // Audio: 300 -> 400
2519 // Video: 320 -> 420
2520 // Buffered Range: 320 -> 400 (end overlap)
2521 AppendCluster(GenerateSingleStreamCluster(300, 400, kAudioTrackNum, 50));
2522 AppendCluster(GenerateSingleStreamCluster(320, 420, kVideoTrackNum, 50));
2524 CheckExpectedRanges("{ [0,23) [320,400) }");
2526 // Audio: 520 -> 590
2527 // Video: 500 -> 570
2528 // Buffered Range: 520 -> 570 (front overlap)
2529 AppendCluster(GenerateSingleStreamCluster(520, 590, kAudioTrackNum, 70));
2530 AppendCluster(GenerateSingleStreamCluster(500, 570, kVideoTrackNum, 70));
2532 CheckExpectedRanges("{ [0,23) [320,400) [520,570) }");
2534 // Audio: 720 -> 750
2535 // Video: 700 -> 770
2536 // Buffered Range: 720 -> 750 (complete overlap, audio)
2537 AppendCluster(GenerateSingleStreamCluster(720, 750, kAudioTrackNum, 30));
2538 AppendCluster(GenerateSingleStreamCluster(700, 770, kVideoTrackNum, 70));
2540 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) }");
2542 // Audio: 900 -> 970
2543 // Video: 920 -> 950
2544 // Buffered Range: 920 -> 950 (complete overlap, video)
2545 AppendCluster(GenerateSingleStreamCluster(900, 970, kAudioTrackNum, 70));
2546 AppendCluster(GenerateSingleStreamCluster(920, 950, kVideoTrackNum, 30));
2548 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2550 // Appending within buffered range should not affect buffered ranges.
2551 AppendCluster(GenerateSingleStreamCluster(930, 950, kAudioTrackNum, 20));
2552 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2554 // Appending to single stream outside buffered ranges should not affect
2555 // buffered ranges.
2556 AppendCluster(GenerateSingleStreamCluster(1230, 1240, kVideoTrackNum, 10));
2557 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2560 TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideoText) {
2561 EXPECT_CALL(host_, AddTextStream(_, _));
2562 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
2564 // Append audio & video data
2565 AppendMuxedCluster(
2566 MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
2567 MuxedStreamInfo(kVideoTrackNum, "0K 33"));
2569 // Verify that a text track with no cues does not result in an empty buffered
2570 // range.
2571 CheckExpectedRanges("{ [0,46) }");
2573 // Add some text cues.
2574 AppendMuxedCluster(
2575 MuxedStreamInfo(kAudioTrackNum, "100K 123K"),
2576 MuxedStreamInfo(kVideoTrackNum, "100K 133"),
2577 MuxedStreamInfo(kTextTrackNum, "100K 200K"));
2579 // Verify that the text cues are not reflected in the buffered ranges.
2580 CheckExpectedRanges("{ [0,46) [100,146) }");
2582 // Remove the buffered ranges.
2583 demuxer_->Remove(kSourceId, base::TimeDelta(),
2584 base::TimeDelta::FromMilliseconds(250));
2585 CheckExpectedRanges("{ }");
2588 // Once MarkEndOfStream() is called, GetBufferedRanges should not cut off any
2589 // over-hanging tails at the end of the ranges as this is likely due to block
2590 // duration differences.
2591 TEST_F(ChunkDemuxerTest, GetBufferedRanges_EndOfStream) {
2592 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2594 AppendMuxedCluster(
2595 MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
2596 MuxedStreamInfo(kVideoTrackNum, "0K 33"));
2598 CheckExpectedRanges("{ [0,46) }");
2600 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
2601 MarkEndOfStream(PIPELINE_OK);
2603 // Verify that the range extends to the end of the video data.
2604 CheckExpectedRanges("{ [0,66) }");
2606 // Verify that the range reverts to the intersection when end of stream
2607 // has been cancelled.
2608 demuxer_->UnmarkEndOfStream();
2609 CheckExpectedRanges("{ [0,46) }");
2611 // Append and remove data so that the 2 streams' end ranges do not overlap.
2613 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(398)));
2614 AppendMuxedCluster(
2615 MuxedStreamInfo(kAudioTrackNum, "200K 223K"),
2616 MuxedStreamInfo(kVideoTrackNum, "200K 233 266 299 332K 365"));
2618 // At this point, the per-stream ranges are as follows:
2619 // Audio: [0,46) [200,246)
2620 // Video: [0,66) [200,398)
2621 CheckExpectedRanges("{ [0,46) [200,246) }");
2623 demuxer_->Remove(kSourceId, base::TimeDelta::FromMilliseconds(200),
2624 base::TimeDelta::FromMilliseconds(300));
2626 // At this point, the per-stream ranges are as follows:
2627 // Audio: [0,46)
2628 // Video: [0,66) [332,398)
2629 CheckExpectedRanges("{ [0,46) }");
2631 AppendMuxedCluster(
2632 MuxedStreamInfo(kAudioTrackNum, "200K 223K"),
2633 MuxedStreamInfo(kVideoTrackNum, "200K 233"));
2635 // At this point, the per-stream ranges are as follows:
2636 // Audio: [0,46) [200,246)
2637 // Video: [0,66) [200,266) [332,398)
2638 // NOTE: The last range on each stream do not overlap in time.
2639 CheckExpectedRanges("{ [0,46) [200,246) }");
2641 MarkEndOfStream(PIPELINE_OK);
2643 // NOTE: The last range on each stream gets extended to the highest
2644 // end timestamp according to the spec. The last audio range gets extended
2645 // from [200,246) to [200,398) which is why the intersection results in the
2646 // middle range getting larger AND the new range appearing.
2647 CheckExpectedRanges("{ [0,46) [200,266) [332,398) }");
2650 TEST_F(ChunkDemuxerTest, DifferentStreamTimecodes) {
2651 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2653 // Create a cluster where the video timecode begins 25ms after the audio.
2654 AppendCluster(GenerateCluster(0, 25, 8));
2656 Seek(base::TimeDelta::FromSeconds(0));
2657 GenerateExpectedReads(0, 25, 8);
2659 // Seek to 5 seconds.
2660 Seek(base::TimeDelta::FromSeconds(5));
2662 // Generate a cluster to fulfill this seek, where audio timecode begins 25ms
2663 // after the video.
2664 AppendCluster(GenerateCluster(5025, 5000, 8));
2665 GenerateExpectedReads(5025, 5000, 8);
2668 TEST_F(ChunkDemuxerTest, DifferentStreamTimecodesSeparateSources) {
2669 std::string audio_id = "audio1";
2670 std::string video_id = "video1";
2671 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2673 // Generate two streams where the video stream starts 5ms after the audio
2674 // stream and append them.
2675 AppendCluster(audio_id, GenerateSingleStreamCluster(
2676 25, 4 * kAudioBlockDuration + 25, kAudioTrackNum, kAudioBlockDuration));
2677 AppendCluster(video_id, GenerateSingleStreamCluster(
2678 30, 4 * kVideoBlockDuration + 30, kVideoTrackNum, kVideoBlockDuration));
2680 // Both streams should be able to fulfill a seek to 25.
2681 Seek(base::TimeDelta::FromMilliseconds(25));
2682 GenerateAudioStreamExpectedReads(25, 4);
2683 GenerateVideoStreamExpectedReads(30, 4);
2686 TEST_F(ChunkDemuxerTest, DifferentStreamTimecodesOutOfRange) {
2687 std::string audio_id = "audio1";
2688 std::string video_id = "video1";
2689 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2691 // Generate two streams where the video stream starts 10s after the audio
2692 // stream and append them.
2693 AppendCluster(audio_id, GenerateSingleStreamCluster(0,
2694 4 * kAudioBlockDuration + 0, kAudioTrackNum, kAudioBlockDuration));
2695 AppendCluster(video_id, GenerateSingleStreamCluster(10000,
2696 4 * kVideoBlockDuration + 10000, kVideoTrackNum, kVideoBlockDuration));
2698 // Should not be able to fulfill a seek to 0.
2699 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(0);
2700 demuxer_->StartWaitingForSeek(seek_time);
2701 demuxer_->Seek(seek_time,
2702 NewExpectedStatusCB(PIPELINE_ERROR_ABORT));
2703 ExpectRead(DemuxerStream::AUDIO, 0);
2704 ExpectEndOfStream(DemuxerStream::VIDEO);
2707 TEST_F(ChunkDemuxerTest, ClusterWithNoBuffers) {
2708 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2710 // Generate and append an empty cluster beginning at 0.
2711 AppendEmptyCluster(0);
2713 // Sanity check that data can be appended after this cluster correctly.
2714 AppendCluster(GenerateCluster(0, 2));
2715 ExpectRead(DemuxerStream::AUDIO, 0);
2716 ExpectRead(DemuxerStream::VIDEO, 0);
2719 TEST_F(ChunkDemuxerTest, CodecPrefixMatching) {
2720 ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
2722 #if defined(USE_PROPRIETARY_CODECS)
2723 expected = ChunkDemuxer::kOk;
2724 #endif
2726 std::vector<std::string> codecs;
2727 codecs.push_back("avc1.4D4041");
2729 EXPECT_EQ(demuxer_->AddId("source_id", "video/mp4", codecs), expected);
2732 // Test codec ID's that are not compliant with RFC6381, but have been
2733 // seen in the wild.
2734 TEST_F(ChunkDemuxerTest, CodecIDsThatAreNotRFC6381Compliant) {
2735 ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
2737 #if defined(USE_PROPRIETARY_CODECS)
2738 expected = ChunkDemuxer::kOk;
2739 #endif
2740 const char* codec_ids[] = {
2741 // GPAC places leading zeros on the audio object type.
2742 "mp4a.40.02",
2743 "mp4a.40.05"
2746 for (size_t i = 0; i < arraysize(codec_ids); ++i) {
2747 std::vector<std::string> codecs;
2748 codecs.push_back(codec_ids[i]);
2750 ChunkDemuxer::Status result =
2751 demuxer_->AddId("source_id", "audio/mp4", codecs);
2753 EXPECT_EQ(result, expected)
2754 << "Fail to add codec_id '" << codec_ids[i] << "'";
2756 if (result == ChunkDemuxer::kOk)
2757 demuxer_->RemoveId("source_id");
2761 TEST_F(ChunkDemuxerTest, EndOfStreamStillSetAfterSeek) {
2762 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2764 EXPECT_CALL(host_, SetDuration(_))
2765 .Times(AnyNumber());
2767 base::TimeDelta kLastAudioTimestamp = base::TimeDelta::FromMilliseconds(92);
2768 base::TimeDelta kLastVideoTimestamp = base::TimeDelta::FromMilliseconds(99);
2770 AppendCluster(kDefaultFirstCluster());
2771 AppendCluster(kDefaultSecondCluster());
2772 MarkEndOfStream(PIPELINE_OK);
2774 DemuxerStream::Status status;
2775 base::TimeDelta last_timestamp;
2777 // Verify that we can read audio & video to the end w/o problems.
2778 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2779 EXPECT_EQ(DemuxerStream::kOk, status);
2780 EXPECT_EQ(kLastAudioTimestamp, last_timestamp);
2782 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2783 EXPECT_EQ(DemuxerStream::kOk, status);
2784 EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
2786 // Seek back to 0 and verify that we can read to the end again..
2787 Seek(base::TimeDelta::FromMilliseconds(0));
2789 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2790 EXPECT_EQ(DemuxerStream::kOk, status);
2791 EXPECT_EQ(kLastAudioTimestamp, last_timestamp);
2793 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2794 EXPECT_EQ(DemuxerStream::kOk, status);
2795 EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
2798 TEST_F(ChunkDemuxerTest, GetBufferedRangesBeforeInitSegment) {
2799 EXPECT_CALL(*this, DemuxerOpened());
2800 demuxer_->Initialize(&host_, CreateInitDoneCB(PIPELINE_OK), true);
2801 ASSERT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
2802 ASSERT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
2804 CheckExpectedRanges("audio", "{ }");
2805 CheckExpectedRanges("video", "{ }");
2808 // Test that Seek() completes successfully when the first cluster
2809 // arrives.
2810 TEST_F(ChunkDemuxerTest, EndOfStreamDuringSeek) {
2811 InSequence s;
2813 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2815 AppendCluster(kDefaultFirstCluster());
2817 base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
2818 demuxer_->StartWaitingForSeek(seek_time);
2820 AppendCluster(kDefaultSecondCluster());
2821 EXPECT_CALL(host_, SetDuration(
2822 base::TimeDelta::FromMilliseconds(kDefaultSecondClusterEndTimestamp)));
2823 MarkEndOfStream(PIPELINE_OK);
2825 demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
2827 GenerateExpectedReads(0, 4);
2828 GenerateExpectedReads(46, 66, 5);
2830 EndOfStreamHelper end_of_stream_helper(demuxer_.get());
2831 end_of_stream_helper.RequestReads();
2832 end_of_stream_helper.CheckIfReadDonesWereCalled(true);
2835 TEST_F(ChunkDemuxerTest, ConfigChange_Video) {
2836 InSequence s;
2838 ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2840 DemuxerStream::Status status;
2841 base::TimeDelta last_timestamp;
2843 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2845 // Fetch initial video config and verify it matches what we expect.
2846 const VideoDecoderConfig& video_config_1 = video->video_decoder_config();
2847 ASSERT_TRUE(video_config_1.IsValidConfig());
2848 EXPECT_EQ(video_config_1.natural_size().width(), 320);
2849 EXPECT_EQ(video_config_1.natural_size().height(), 240);
2851 ExpectRead(DemuxerStream::VIDEO, 0);
2853 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2855 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2856 EXPECT_EQ(last_timestamp.InMilliseconds(), 501);
2858 // Fetch the new decoder config.
2859 const VideoDecoderConfig& video_config_2 = video->video_decoder_config();
2860 ASSERT_TRUE(video_config_2.IsValidConfig());
2861 EXPECT_EQ(video_config_2.natural_size().width(), 640);
2862 EXPECT_EQ(video_config_2.natural_size().height(), 360);
2864 ExpectRead(DemuxerStream::VIDEO, 527);
2866 // Read until the next config change.
2867 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2868 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2869 EXPECT_EQ(last_timestamp.InMilliseconds(), 793);
2871 // Get the new config and verify that it matches the first one.
2872 ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2874 ExpectRead(DemuxerStream::VIDEO, 801);
2876 // Read until the end of the stream just to make sure there aren't any other
2877 // config changes.
2878 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2879 ASSERT_EQ(status, DemuxerStream::kOk);
2882 TEST_F(ChunkDemuxerTest, ConfigChange_Audio) {
2883 InSequence s;
2885 ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2887 DemuxerStream::Status status;
2888 base::TimeDelta last_timestamp;
2890 DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
2892 // Fetch initial audio config and verify it matches what we expect.
2893 const AudioDecoderConfig& audio_config_1 = audio->audio_decoder_config();
2894 ASSERT_TRUE(audio_config_1.IsValidConfig());
2895 EXPECT_EQ(audio_config_1.samples_per_second(), 44100);
2896 EXPECT_EQ(audio_config_1.extra_data_size(), 3863u);
2898 ExpectRead(DemuxerStream::AUDIO, 0);
2900 // The first config change seen is from a splice frame representing an overlap
2901 // of buffer from config 1 by buffers from config 2.
2902 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2903 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2904 EXPECT_EQ(last_timestamp.InMilliseconds(), 524);
2906 // Fetch the new decoder config.
2907 const AudioDecoderConfig& audio_config_2 = audio->audio_decoder_config();
2908 ASSERT_TRUE(audio_config_2.IsValidConfig());
2909 EXPECT_EQ(audio_config_2.samples_per_second(), 44100);
2910 EXPECT_EQ(audio_config_2.extra_data_size(), 3935u);
2912 // The next config change is from a splice frame representing an overlap of
2913 // buffers from config 2 by buffers from config 1.
2914 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2915 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2916 EXPECT_EQ(last_timestamp.InMilliseconds(), 782);
2917 ASSERT_TRUE(audio_config_1.Matches(audio->audio_decoder_config()));
2919 // Read until the end of the stream just to make sure there aren't any other
2920 // config changes.
2921 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2922 ASSERT_EQ(status, DemuxerStream::kOk);
2923 EXPECT_EQ(last_timestamp.InMilliseconds(), 2744);
2926 TEST_F(ChunkDemuxerTest, ConfigChange_Seek) {
2927 InSequence s;
2929 ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2931 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2933 // Fetch initial video config and verify it matches what we expect.
2934 const VideoDecoderConfig& video_config_1 = video->video_decoder_config();
2935 ASSERT_TRUE(video_config_1.IsValidConfig());
2936 EXPECT_EQ(video_config_1.natural_size().width(), 320);
2937 EXPECT_EQ(video_config_1.natural_size().height(), 240);
2939 ExpectRead(DemuxerStream::VIDEO, 0);
2941 // Seek to a location with a different config.
2942 Seek(base::TimeDelta::FromMilliseconds(527));
2944 // Verify that the config change is signalled.
2945 ExpectConfigChanged(DemuxerStream::VIDEO);
2947 // Fetch the new decoder config and verify it is what we expect.
2948 const VideoDecoderConfig& video_config_2 = video->video_decoder_config();
2949 ASSERT_TRUE(video_config_2.IsValidConfig());
2950 EXPECT_EQ(video_config_2.natural_size().width(), 640);
2951 EXPECT_EQ(video_config_2.natural_size().height(), 360);
2953 // Verify that Read() will return a buffer now.
2954 ExpectRead(DemuxerStream::VIDEO, 527);
2956 // Seek back to the beginning and verify we get another config change.
2957 Seek(base::TimeDelta::FromMilliseconds(0));
2958 ExpectConfigChanged(DemuxerStream::VIDEO);
2959 ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2960 ExpectRead(DemuxerStream::VIDEO, 0);
2962 // Seek to a location that requires a config change and then
2963 // seek to a new location that has the same configuration as
2964 // the start of the file without a Read() in the middle.
2965 Seek(base::TimeDelta::FromMilliseconds(527));
2966 Seek(base::TimeDelta::FromMilliseconds(801));
2968 // Verify that no config change is signalled.
2969 ExpectRead(DemuxerStream::VIDEO, 801);
2970 ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2973 TEST_F(ChunkDemuxerTest, TimestampPositiveOffset) {
2974 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2976 ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(30)));
2977 AppendCluster(GenerateCluster(0, 2));
2979 Seek(base::TimeDelta::FromMilliseconds(30000));
2981 GenerateExpectedReads(30000, 2);
2984 TEST_F(ChunkDemuxerTest, TimestampNegativeOffset) {
2985 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2987 ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(-1)));
2988 AppendCluster(GenerateCluster(1000, 2));
2990 GenerateExpectedReads(0, 2);
2993 TEST_F(ChunkDemuxerTest, TimestampOffsetSeparateStreams) {
2994 std::string audio_id = "audio1";
2995 std::string video_id = "video1";
2996 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2998 ASSERT_TRUE(SetTimestampOffset(
2999 audio_id, base::TimeDelta::FromMilliseconds(-2500)));
3000 ASSERT_TRUE(SetTimestampOffset(
3001 video_id, base::TimeDelta::FromMilliseconds(-2500)));
3002 AppendCluster(audio_id, GenerateSingleStreamCluster(2500,
3003 2500 + kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
3004 AppendCluster(video_id, GenerateSingleStreamCluster(2500,
3005 2500 + kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
3006 GenerateAudioStreamExpectedReads(0, 4);
3007 GenerateVideoStreamExpectedReads(0, 4);
3009 Seek(base::TimeDelta::FromMilliseconds(27300));
3011 ASSERT_TRUE(SetTimestampOffset(
3012 audio_id, base::TimeDelta::FromMilliseconds(27300)));
3013 ASSERT_TRUE(SetTimestampOffset(
3014 video_id, base::TimeDelta::FromMilliseconds(27300)));
3015 AppendCluster(audio_id, GenerateSingleStreamCluster(
3016 0, kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
3017 AppendCluster(video_id, GenerateSingleStreamCluster(
3018 0, kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
3019 GenerateVideoStreamExpectedReads(27300, 4);
3020 GenerateAudioStreamExpectedReads(27300, 4);
3023 TEST_F(ChunkDemuxerTest, IsParsingMediaSegmentMidMediaSegment) {
3024 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3026 scoped_ptr<Cluster> cluster = GenerateCluster(0, 2);
3027 // Append only part of the cluster data.
3028 AppendData(cluster->data(), cluster->size() - 13);
3030 // Confirm we're in the middle of parsing a media segment.
3031 ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
3033 demuxer_->Abort(kSourceId,
3034 append_window_start_for_next_append_,
3035 append_window_end_for_next_append_,
3036 &timestamp_offset_map_[kSourceId]);
3038 // After Abort(), parsing should no longer be in the middle of a media
3039 // segment.
3040 ASSERT_FALSE(demuxer_->IsParsingMediaSegment(kSourceId));
3043 #if defined(USE_PROPRIETARY_CODECS)
3044 #if defined(ENABLE_MPEG2TS_STREAM_PARSER)
3045 TEST_F(ChunkDemuxerTest, EmitBuffersDuringAbort) {
3046 EXPECT_CALL(*this, DemuxerOpened());
3047 demuxer_->Initialize(
3048 &host_, CreateInitDoneCB(kInfiniteDuration(), PIPELINE_OK), true);
3049 EXPECT_EQ(ChunkDemuxer::kOk, AddIdForMp2tSource(kSourceId));
3051 // For info:
3052 // DTS/PTS derived using dvbsnoop -s ts -if bear-1280x720.ts -tssubdecode
3053 // Video: first PES:
3054 // PTS: 126912 (0x0001efc0) [= 90 kHz-Timestamp: 0:00:01.4101]
3055 // DTS: 123909 (0x0001e405) [= 90 kHz-Timestamp: 0:00:01.3767]
3056 // Audio: first PES:
3057 // PTS: 126000 (0x0001ec30) [= 90 kHz-Timestamp: 0:00:01.4000]
3058 // DTS: 123910 (0x0001e406) [= 90 kHz-Timestamp: 0:00:01.3767]
3059 // Video: last PES:
3060 // PTS: 370155 (0x0005a5eb) [= 90 kHz-Timestamp: 0:00:04.1128]
3061 // DTS: 367152 (0x00059a30) [= 90 kHz-Timestamp: 0:00:04.0794]
3062 // Audio: last PES:
3063 // PTS: 353788 (0x000565fc) [= 90 kHz-Timestamp: 0:00:03.9309]
3065 scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("bear-1280x720.ts");
3066 EXPECT_CALL(*this, InitSegmentReceived());
3067 AppendData(kSourceId, buffer->data(), buffer->data_size());
3069 // Confirm we're in the middle of parsing a media segment.
3070 ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
3072 // Abort on the Mpeg2 TS parser triggers the emission of the last video
3073 // buffer which is pending in the stream parser.
3074 Ranges<base::TimeDelta> range_before_abort =
3075 demuxer_->GetBufferedRanges(kSourceId);
3076 demuxer_->Abort(kSourceId,
3077 append_window_start_for_next_append_,
3078 append_window_end_for_next_append_,
3079 &timestamp_offset_map_[kSourceId]);
3080 Ranges<base::TimeDelta> range_after_abort =
3081 demuxer_->GetBufferedRanges(kSourceId);
3083 ASSERT_EQ(range_before_abort.size(), 1u);
3084 ASSERT_EQ(range_after_abort.size(), 1u);
3085 EXPECT_EQ(range_after_abort.start(0), range_before_abort.start(0));
3086 EXPECT_GT(range_after_abort.end(0), range_before_abort.end(0));
3089 TEST_F(ChunkDemuxerTest, SeekCompleteDuringAbort) {
3090 EXPECT_CALL(*this, DemuxerOpened());
3091 demuxer_->Initialize(
3092 &host_, CreateInitDoneCB(kInfiniteDuration(), PIPELINE_OK), true);
3093 EXPECT_EQ(ChunkDemuxer::kOk, AddIdForMp2tSource(kSourceId));
3095 // For info:
3096 // DTS/PTS derived using dvbsnoop -s ts -if bear-1280x720.ts -tssubdecode
3097 // Video: first PES:
3098 // PTS: 126912 (0x0001efc0) [= 90 kHz-Timestamp: 0:00:01.4101]
3099 // DTS: 123909 (0x0001e405) [= 90 kHz-Timestamp: 0:00:01.3767]
3100 // Audio: first PES:
3101 // PTS: 126000 (0x0001ec30) [= 90 kHz-Timestamp: 0:00:01.4000]
3102 // DTS: 123910 (0x0001e406) [= 90 kHz-Timestamp: 0:00:01.3767]
3103 // Video: last PES:
3104 // PTS: 370155 (0x0005a5eb) [= 90 kHz-Timestamp: 0:00:04.1128]
3105 // DTS: 367152 (0x00059a30) [= 90 kHz-Timestamp: 0:00:04.0794]
3106 // Audio: last PES:
3107 // PTS: 353788 (0x000565fc) [= 90 kHz-Timestamp: 0:00:03.9309]
3109 scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("bear-1280x720.ts");
3110 EXPECT_CALL(*this, InitSegmentReceived());
3111 AppendData(kSourceId, buffer->data(), buffer->data_size());
3113 // Confirm we're in the middle of parsing a media segment.
3114 ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
3116 // Seek to a time corresponding to buffers that will be emitted during the
3117 // abort.
3118 Seek(base::TimeDelta::FromMilliseconds(4110));
3120 // Abort on the Mpeg2 TS parser triggers the emission of the last video
3121 // buffer which is pending in the stream parser.
3122 demuxer_->Abort(kSourceId,
3123 append_window_start_for_next_append_,
3124 append_window_end_for_next_append_,
3125 &timestamp_offset_map_[kSourceId]);
3128 #endif
3129 #endif
3131 TEST_F(ChunkDemuxerTest, WebMIsParsingMediaSegmentDetection) {
3132 const uint8 kBuffer[] = {
3133 0x1F, 0x43, 0xB6, 0x75, 0x83, // CLUSTER (size = 3)
3134 0xE7, 0x81, 0x01, // Cluster TIMECODE (value = 1)
3136 0x1F, 0x43, 0xB6, 0x75, 0xFF, // CLUSTER (size = unknown; really 3 due to:)
3137 0xE7, 0x81, 0x02, // Cluster TIMECODE (value = 2)
3138 /* e.g. put some blocks here... */
3139 0x1A, 0x45, 0xDF, 0xA3, 0x8A, // EBMLHEADER (size = 10, not fully appended)
3142 // This array indicates expected return value of IsParsingMediaSegment()
3143 // following each incrementally appended byte in |kBuffer|.
3144 const bool kExpectedReturnValues[] = {
3145 false, false, false, false, true,
3146 true, true, false,
3148 false, false, false, false, true,
3149 true, true, true,
3151 true, true, true, true, false,
3154 static_assert(arraysize(kBuffer) == arraysize(kExpectedReturnValues),
3155 "test arrays out of sync");
3156 static_assert(arraysize(kBuffer) == sizeof(kBuffer),
3157 "there should be one byte per index");
3159 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3161 for (size_t i = 0; i < sizeof(kBuffer); i++) {
3162 DVLOG(3) << "Appending and testing index " << i;
3163 AppendData(kBuffer + i, 1);
3164 bool expected_return_value = kExpectedReturnValues[i];
3165 EXPECT_EQ(expected_return_value,
3166 demuxer_->IsParsingMediaSegment(kSourceId));
3170 TEST_F(ChunkDemuxerTest, DurationChange) {
3171 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3172 const int kStreamDuration = kDefaultDuration().InMilliseconds();
3174 // Add data leading up to the currently set duration.
3175 AppendCluster(GenerateCluster(kStreamDuration - kAudioBlockDuration,
3176 kStreamDuration - kVideoBlockDuration,
3177 2));
3179 CheckExpectedRanges(kSourceId, "{ [201191,201224) }");
3181 // Add data beginning at the currently set duration and expect a new duration
3182 // to be signaled. Note that the last video block will have a higher end
3183 // timestamp than the last audio block.
3184 const int kNewStreamDurationVideo = kStreamDuration + kVideoBlockDuration;
3185 EXPECT_CALL(host_, SetDuration(
3186 base::TimeDelta::FromMilliseconds(kNewStreamDurationVideo)));
3187 AppendCluster(GenerateCluster(kDefaultDuration().InMilliseconds(), 2));
3189 CheckExpectedRanges(kSourceId, "{ [201191,201247) }");
3191 // Add more data to the end of each media type. Note that the last audio block
3192 // will have a higher end timestamp than the last video block.
3193 const int kFinalStreamDuration = kStreamDuration + kAudioBlockDuration * 3;
3194 EXPECT_CALL(host_, SetDuration(
3195 base::TimeDelta::FromMilliseconds(kFinalStreamDuration)));
3196 AppendCluster(GenerateCluster(kStreamDuration + kAudioBlockDuration,
3197 kStreamDuration + kVideoBlockDuration,
3198 3));
3200 // See that the range has increased appropriately (but not to the full
3201 // duration of 201293, since there is not enough video appended for that).
3202 CheckExpectedRanges(kSourceId, "{ [201191,201290) }");
3205 TEST_F(ChunkDemuxerTest, DurationChangeTimestampOffset) {
3206 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3207 ASSERT_TRUE(SetTimestampOffset(kSourceId, kDefaultDuration()));
3208 EXPECT_CALL(host_, SetDuration(
3209 kDefaultDuration() + base::TimeDelta::FromMilliseconds(
3210 kVideoBlockDuration * 2)));
3211 AppendCluster(GenerateCluster(0, 4));
3214 TEST_F(ChunkDemuxerTest, EndOfStreamTruncateDuration) {
3215 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3217 AppendCluster(kDefaultFirstCluster());
3219 EXPECT_CALL(host_, SetDuration(
3220 base::TimeDelta::FromMilliseconds(kDefaultFirstClusterEndTimestamp)));
3221 MarkEndOfStream(PIPELINE_OK);
3225 TEST_F(ChunkDemuxerTest, ZeroLengthAppend) {
3226 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3227 AppendData(NULL, 0);
3230 TEST_F(ChunkDemuxerTest, AppendAfterEndOfStream) {
3231 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3233 EXPECT_CALL(host_, SetDuration(_))
3234 .Times(AnyNumber());
3236 AppendCluster(kDefaultFirstCluster());
3237 MarkEndOfStream(PIPELINE_OK);
3239 demuxer_->UnmarkEndOfStream();
3241 AppendCluster(kDefaultSecondCluster());
3242 MarkEndOfStream(PIPELINE_OK);
3245 // Test receiving a Shutdown() call before we get an Initialize()
3246 // call. This can happen if video element gets destroyed before
3247 // the pipeline has a chance to initialize the demuxer.
3248 TEST_F(ChunkDemuxerTest, Shutdown_BeforeInitialize) {
3249 demuxer_->Shutdown();
3250 demuxer_->Initialize(
3251 &host_, CreateInitDoneCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
3252 message_loop_.RunUntilIdle();
3255 // Verifies that signaling end of stream while stalled at a gap
3256 // boundary does not trigger end of stream buffers to be returned.
3257 TEST_F(ChunkDemuxerTest, EndOfStreamWhileWaitingForGapToBeFilled) {
3258 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3260 AppendCluster(0, 10);
3261 AppendCluster(300, 10);
3262 CheckExpectedRanges(kSourceId, "{ [0,132) [300,432) }");
3264 GenerateExpectedReads(0, 10);
3266 bool audio_read_done = false;
3267 bool video_read_done = false;
3268 ReadAudio(base::Bind(&OnReadDone,
3269 base::TimeDelta::FromMilliseconds(138),
3270 &audio_read_done));
3271 ReadVideo(base::Bind(&OnReadDone,
3272 base::TimeDelta::FromMilliseconds(138),
3273 &video_read_done));
3275 // Verify that the reads didn't complete
3276 EXPECT_FALSE(audio_read_done);
3277 EXPECT_FALSE(video_read_done);
3279 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(438)));
3280 MarkEndOfStream(PIPELINE_OK);
3282 // Verify that the reads still haven't completed.
3283 EXPECT_FALSE(audio_read_done);
3284 EXPECT_FALSE(video_read_done);
3286 demuxer_->UnmarkEndOfStream();
3288 AppendCluster(138, 22);
3290 message_loop_.RunUntilIdle();
3292 CheckExpectedRanges(kSourceId, "{ [0,435) }");
3294 // Verify that the reads have completed.
3295 EXPECT_TRUE(audio_read_done);
3296 EXPECT_TRUE(video_read_done);
3298 // Read the rest of the buffers.
3299 GenerateExpectedReads(161, 171, 20);
3301 // Verify that reads block because the append cleared the end of stream state.
3302 audio_read_done = false;
3303 video_read_done = false;
3304 ReadAudio(base::Bind(&OnReadDone_EOSExpected,
3305 &audio_read_done));
3306 ReadVideo(base::Bind(&OnReadDone_EOSExpected,
3307 &video_read_done));
3309 // Verify that the reads don't complete.
3310 EXPECT_FALSE(audio_read_done);
3311 EXPECT_FALSE(video_read_done);
3313 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(437)));
3314 MarkEndOfStream(PIPELINE_OK);
3316 EXPECT_TRUE(audio_read_done);
3317 EXPECT_TRUE(video_read_done);
3320 TEST_F(ChunkDemuxerTest, CanceledSeekDuringInitialPreroll) {
3321 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3323 // Cancel preroll.
3324 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(200);
3325 demuxer_->CancelPendingSeek(seek_time);
3327 // Initiate the seek to the new location.
3328 Seek(seek_time);
3330 // Append data to satisfy the seek.
3331 AppendCluster(seek_time.InMilliseconds(), 10);
3334 TEST_F(ChunkDemuxerTest, SetMemoryLimitType) {
3335 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3337 // Set different memory limits for audio and video.
3338 demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 10 * kBlockSize);
3339 demuxer_->SetMemoryLimits(DemuxerStream::VIDEO, 5 * kBlockSize);
3341 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(1000);
3343 // Append data at the start that can be garbage collected:
3344 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 0, 10);
3345 AppendSingleStreamCluster(kSourceId, kVideoTrackNum, 0, 5);
3347 CheckExpectedRanges(DemuxerStream::AUDIO, "{ [0,230) }");
3348 CheckExpectedRanges(DemuxerStream::VIDEO, "{ [0,165) }");
3350 // Seek so we can garbage collect the data appended above.
3351 Seek(seek_time);
3353 // Append data at seek_time.
3354 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3355 seek_time.InMilliseconds(), 10);
3356 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3357 seek_time.InMilliseconds(), 5);
3359 // Verify that the old data, and nothing more, has been garbage collected.
3360 CheckExpectedRanges(DemuxerStream::AUDIO, "{ [1000,1230) }");
3361 CheckExpectedRanges(DemuxerStream::VIDEO, "{ [1000,1165) }");
3364 TEST_F(ChunkDemuxerTest, GCDuringSeek) {
3365 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3367 demuxer_->SetMemoryLimits(DemuxerStream::AUDIO, 5 * kBlockSize);
3369 base::TimeDelta seek_time1 = base::TimeDelta::FromMilliseconds(1000);
3370 base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(500);
3372 // Initiate a seek to |seek_time1|.
3373 Seek(seek_time1);
3375 // Append data to satisfy the first seek request.
3376 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3377 seek_time1.InMilliseconds(), 5);
3378 CheckExpectedRanges(kSourceId, "{ [1000,1115) }");
3380 // Signal that the second seek is starting.
3381 demuxer_->StartWaitingForSeek(seek_time2);
3383 // Append data to satisfy the second seek. This append triggers
3384 // the garbage collection logic since we set the memory limit to
3385 // 5 blocks.
3386 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3387 seek_time2.InMilliseconds(), 5);
3389 // Verify that the buffers that cover |seek_time2| do not get
3390 // garbage collected.
3391 CheckExpectedRanges(kSourceId, "{ [500,615) }");
3393 // Complete the seek.
3394 demuxer_->Seek(seek_time2, NewExpectedStatusCB(PIPELINE_OK));
3397 // Append more data and make sure that the blocks for |seek_time2|
3398 // don't get removed.
3400 // NOTE: The current GC algorithm tries to preserve the GOP at the
3401 // current position as well as the last appended GOP. This is
3402 // why there are 2 ranges in the expectations.
3403 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 700, 5);
3404 CheckExpectedRanges(kSourceId, "{ [500,592) [792,815) }");
3407 TEST_F(ChunkDemuxerTest, AppendWindow_Video) {
3408 ASSERT_TRUE(InitDemuxer(HAS_VIDEO));
3409 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3411 // Set the append window to [50,280).
3412 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
3413 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3415 // Append a cluster that starts before and ends after the append window.
3416 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3417 "0K 30 60 90 120K 150 180 210 240K 270 300 330K");
3419 // Verify that GOPs that start outside the window are not included
3420 // in the buffer. Also verify that buffers that start inside the
3421 // window and extend beyond the end of the window are not included.
3422 CheckExpectedRanges(kSourceId, "{ [120,270) }");
3423 CheckExpectedBuffers(stream, "120K 150 180 210 240K");
3425 // Extend the append window to [50,650).
3426 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3428 // Append more data and verify that adding buffers start at the next
3429 // key frame.
3430 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3431 "360 390 420K 450 480 510 540K 570 600 630K");
3432 CheckExpectedRanges(kSourceId, "{ [120,270) [420,630) }");
3435 TEST_F(ChunkDemuxerTest, AppendWindow_Audio) {
3436 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3437 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3439 // Set the append window to [50,280).
3440 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
3441 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3443 // Append a cluster that starts before and ends after the append window.
3444 AppendSingleStreamCluster(
3445 kSourceId, kAudioTrackNum,
3446 "0K 30K 60K 90K 120K 150K 180K 210K 240K 270K 300K 330K");
3448 // Verify that frames that end outside the window are not included
3449 // in the buffer. Also verify that buffers that start inside the
3450 // window and extend beyond the end of the window are not included.
3452 // The first 50ms of the range should be truncated since it overlaps
3453 // the start of the append window.
3454 CheckExpectedRanges(kSourceId, "{ [50,280) }");
3456 // The "50P" buffer is the "0" buffer marked for complete discard. The next
3457 // "50" buffer is the "30" buffer marked with 20ms of start discard.
3458 CheckExpectedBuffers(stream, "50KP 50K 60K 90K 120K 150K 180K 210K 240K");
3460 // Extend the append window to [50,650).
3461 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3463 // Append more data and verify that a new range is created.
3464 AppendSingleStreamCluster(
3465 kSourceId, kAudioTrackNum,
3466 "360K 390K 420K 450K 480K 510K 540K 570K 600K 630K");
3467 CheckExpectedRanges(kSourceId, "{ [50,280) [360,650) }");
3470 TEST_F(ChunkDemuxerTest, AppendWindow_AudioOverlapStartAndEnd) {
3471 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3473 // Set the append window to [10,20).
3474 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(10);
3475 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
3477 // Append a cluster that starts before and ends after the append window.
3478 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K");
3480 // Verify the append is clipped to the append window.
3481 CheckExpectedRanges(kSourceId, "{ [10,20) }");
3484 TEST_F(ChunkDemuxerTest, AppendWindow_WebMFile_AudioOnly) {
3485 EXPECT_CALL(*this, DemuxerOpened());
3486 demuxer_->Initialize(
3487 &host_,
3488 CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744), PIPELINE_OK),
3489 true);
3490 ASSERT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, HAS_AUDIO));
3492 // Set the append window to [50,150).
3493 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
3494 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(150);
3496 // Read a WebM file into memory and send the data to the demuxer. The chunk
3497 // size has been chosen carefully to ensure the preroll buffer used by the
3498 // partial append window trim must come from a previous Append() call.
3499 scoped_refptr<DecoderBuffer> buffer =
3500 ReadTestDataFile("bear-320x240-audio-only.webm");
3501 EXPECT_CALL(*this, InitSegmentReceived());
3502 AppendDataInPieces(buffer->data(), buffer->data_size(), 128);
3504 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3505 CheckExpectedBuffers(stream, "50KP 50K 62K 86K 109K 122K 125K 128K");
3508 TEST_F(ChunkDemuxerTest, AppendWindow_AudioConfigUpdateRemovesPreroll) {
3509 EXPECT_CALL(*this, DemuxerOpened());
3510 demuxer_->Initialize(
3511 &host_,
3512 CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744), PIPELINE_OK),
3513 true);
3514 ASSERT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, HAS_AUDIO));
3516 // Set the append window such that the first file is completely before the
3517 // append window.
3518 // TODO(wolenetz/acolwell): Update this duration once the files are fixed to
3519 // have the correct duration in their init segments, and the
3520 // CreateInitDoneCB() call, above, is fixed to used that duration. See
3521 // http://crbug.com/354284.
3522 const base::TimeDelta duration_1 = base::TimeDelta::FromMilliseconds(2746);
3523 append_window_start_for_next_append_ = duration_1;
3525 // Read a WebM file into memory and append the data.
3526 scoped_refptr<DecoderBuffer> buffer =
3527 ReadTestDataFile("bear-320x240-audio-only.webm");
3528 EXPECT_CALL(*this, InitSegmentReceived());
3529 AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
3530 CheckExpectedRanges(kSourceId, "{ }");
3532 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3533 AudioDecoderConfig config_1 = stream->audio_decoder_config();
3535 // Read a second WebM with a different config in and append the data.
3536 scoped_refptr<DecoderBuffer> buffer2 =
3537 ReadTestDataFile("bear-320x240-audio-only-48khz.webm");
3538 EXPECT_CALL(*this, InitSegmentReceived());
3539 EXPECT_CALL(host_, SetDuration(_)).Times(AnyNumber());
3540 ASSERT_TRUE(SetTimestampOffset(kSourceId, duration_1));
3541 AppendDataInPieces(buffer2->data(), buffer2->data_size(), 512);
3542 CheckExpectedRanges(kSourceId, "{ [2746,5519) }");
3544 Seek(duration_1);
3545 ExpectConfigChanged(DemuxerStream::AUDIO);
3546 ASSERT_FALSE(config_1.Matches(stream->audio_decoder_config()));
3547 CheckExpectedBuffers(stream, "2746K 2767K 2789K 2810K");
3550 TEST_F(ChunkDemuxerTest, AppendWindow_Text) {
3551 DemuxerStream* text_stream = NULL;
3552 EXPECT_CALL(host_, AddTextStream(_, _))
3553 .WillOnce(SaveArg<0>(&text_stream));
3554 ASSERT_TRUE(InitDemuxer(HAS_VIDEO | HAS_TEXT));
3555 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3557 // Set the append window to [20,280).
3558 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
3559 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3561 // Append a cluster that starts before and ends after the append
3562 // window.
3563 AppendMuxedCluster(
3564 MuxedStreamInfo(kVideoTrackNum,
3565 "0K 30 60 90 120K 150 180 210 240K 270 300 330K"),
3566 MuxedStreamInfo(kTextTrackNum, "0K 100K 200K 300K" ));
3568 // Verify that text cues that start outside the window are not included
3569 // in the buffer. Also verify that cues that extend beyond the
3570 // window are not included.
3571 CheckExpectedRanges(kSourceId, "{ [100,270) }");
3572 CheckExpectedBuffers(video_stream, "120K 150 180 210 240K");
3573 CheckExpectedBuffers(text_stream, "100K");
3575 // Extend the append window to [20,650).
3576 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3578 // Append more data and verify that a new range is created.
3579 AppendMuxedCluster(
3580 MuxedStreamInfo(kVideoTrackNum,
3581 "360 390 420K 450 480 510 540K 570 600 630K"),
3582 MuxedStreamInfo(kTextTrackNum, "400K 500K 600K 700K" ));
3583 CheckExpectedRanges(kSourceId, "{ [100,270) [400,630) }");
3585 // Seek to the new range and verify that the expected buffers are returned.
3586 Seek(base::TimeDelta::FromMilliseconds(420));
3587 CheckExpectedBuffers(video_stream, "420K 450 480 510 540K 570 600");
3588 CheckExpectedBuffers(text_stream, "400K 500K");
3591 TEST_F(ChunkDemuxerTest, StartWaitingForSeekAfterParseError) {
3592 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3593 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
3594 AppendGarbage();
3595 base::TimeDelta seek_time = base::TimeDelta::FromSeconds(50);
3596 demuxer_->StartWaitingForSeek(seek_time);
3599 TEST_F(ChunkDemuxerTest, Remove_AudioVideoText) {
3600 DemuxerStream* text_stream = NULL;
3601 EXPECT_CALL(host_, AddTextStream(_, _))
3602 .WillOnce(SaveArg<0>(&text_stream));
3603 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
3605 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3606 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3608 AppendMuxedCluster(
3609 MuxedStreamInfo(kAudioTrackNum, "0K 20K 40K 60K 80K 100K 120K 140K"),
3610 MuxedStreamInfo(kVideoTrackNum, "0K 30 60 90 120K 150 180"),
3611 MuxedStreamInfo(kTextTrackNum, "0K 100K 200K"));
3613 CheckExpectedBuffers(audio_stream, "0K 20K 40K 60K 80K 100K 120K 140K");
3614 CheckExpectedBuffers(video_stream, "0K 30 60 90 120K 150 180");
3615 CheckExpectedBuffers(text_stream, "0K 100K 200K");
3617 // Remove the buffers that were added.
3618 demuxer_->Remove(kSourceId, base::TimeDelta(),
3619 base::TimeDelta::FromMilliseconds(300));
3621 // Verify that all the appended data has been removed.
3622 CheckExpectedRanges(kSourceId, "{ }");
3624 // Append new buffers that are clearly different than the original
3625 // ones and verify that only the new buffers are returned.
3626 AppendMuxedCluster(
3627 MuxedStreamInfo(kAudioTrackNum, "1K 21K 41K 61K 81K 101K 121K 141K"),
3628 MuxedStreamInfo(kVideoTrackNum, "1K 31 61 91 121K 151 181"),
3629 MuxedStreamInfo(kTextTrackNum, "1K 101K 201K"));
3631 Seek(base::TimeDelta());
3632 CheckExpectedBuffers(audio_stream, "1K 21K 41K 61K 81K 101K 121K 141K");
3633 CheckExpectedBuffers(video_stream, "1K 31 61 91 121K 151 181");
3634 CheckExpectedBuffers(text_stream, "1K 101K 201K");
3637 TEST_F(ChunkDemuxerTest, Remove_StartAtDuration) {
3638 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3639 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3641 // Set the duration to something small so that the append that
3642 // follows updates the duration to reflect the end of the appended data.
3643 EXPECT_CALL(host_, SetDuration(
3644 base::TimeDelta::FromMilliseconds(1)));
3645 demuxer_->SetDuration(0.001);
3647 EXPECT_CALL(host_, SetDuration(
3648 base::TimeDelta::FromMilliseconds(160)));
3649 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3650 "0K 20K 40K 60K 80K 100K 120K 140K");
3652 CheckExpectedRanges(kSourceId, "{ [0,160) }");
3653 CheckExpectedBuffers(audio_stream, "0K 20K 40K 60K 80K 100K 120K 140K");
3655 demuxer_->Remove(kSourceId,
3656 base::TimeDelta::FromSecondsD(demuxer_->GetDuration()),
3657 kInfiniteDuration());
3659 Seek(base::TimeDelta());
3660 CheckExpectedRanges(kSourceId, "{ [0,160) }");
3661 CheckExpectedBuffers(audio_stream, "0K 20K 40K 60K 80K 100K 120K 140K");
3664 // Verifies that a Seek() will complete without text cues for
3665 // the seek point and will return cues after the seek position
3666 // when they are eventually appended.
3667 TEST_F(ChunkDemuxerTest, SeekCompletesWithoutTextCues) {
3668 DemuxerStream* text_stream = NULL;
3669 EXPECT_CALL(host_, AddTextStream(_, _))
3670 .WillOnce(SaveArg<0>(&text_stream));
3671 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
3673 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3674 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3676 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(120);
3677 bool seek_cb_was_called = false;
3678 demuxer_->StartWaitingForSeek(seek_time);
3679 demuxer_->Seek(seek_time,
3680 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
3681 message_loop_.RunUntilIdle();
3683 EXPECT_FALSE(seek_cb_was_called);
3685 bool text_read_done = false;
3686 text_stream->Read(base::Bind(&OnReadDone,
3687 base::TimeDelta::FromMilliseconds(225),
3688 &text_read_done));
3690 // Append audio & video data so the seek completes.
3691 AppendMuxedCluster(
3692 MuxedStreamInfo(kAudioTrackNum,
3693 "0K 20K 40K 60K 80K 100K 120K 140K 160K 180K 200K"),
3694 MuxedStreamInfo(kVideoTrackNum, "0K 30 60 90 120K 150 180 210"));
3696 message_loop_.RunUntilIdle();
3697 EXPECT_TRUE(seek_cb_was_called);
3698 EXPECT_FALSE(text_read_done);
3700 // Read some audio & video buffers to further verify seek completion.
3701 CheckExpectedBuffers(audio_stream, "120K 140K");
3702 CheckExpectedBuffers(video_stream, "120K 150");
3704 EXPECT_FALSE(text_read_done);
3706 // Append text cues that start after the seek point and verify that
3707 // they are returned by Read() calls.
3708 AppendMuxedCluster(
3709 MuxedStreamInfo(kAudioTrackNum, "220K 240K 260K 280K"),
3710 MuxedStreamInfo(kVideoTrackNum, "240K 270 300 330"),
3711 MuxedStreamInfo(kTextTrackNum, "225K 275K 325K"));
3713 message_loop_.RunUntilIdle();
3714 EXPECT_TRUE(text_read_done);
3716 // NOTE: we start at 275 here because the buffer at 225 was returned
3717 // to the pending read initiated above.
3718 CheckExpectedBuffers(text_stream, "275K 325K");
3720 // Verify that audio & video streams continue to return expected values.
3721 CheckExpectedBuffers(audio_stream, "160K 180K");
3722 CheckExpectedBuffers(video_stream, "180 210");
3725 TEST_F(ChunkDemuxerTest, ClusterWithUnknownSize) {
3726 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3728 AppendCluster(GenerateCluster(0, 0, 4, true));
3729 CheckExpectedRanges(kSourceId, "{ [0,46) }");
3731 // A new cluster indicates end of the previous cluster with unknown size.
3732 AppendCluster(GenerateCluster(46, 66, 5, true));
3733 CheckExpectedRanges(kSourceId, "{ [0,115) }");
3736 TEST_F(ChunkDemuxerTest, CuesBetweenClustersWithUnknownSize) {
3737 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3739 // Add two clusters separated by Cues in a single Append() call.
3740 scoped_ptr<Cluster> cluster = GenerateCluster(0, 0, 4, true);
3741 std::vector<uint8> data(cluster->data(), cluster->data() + cluster->size());
3742 data.insert(data.end(), kCuesHeader, kCuesHeader + sizeof(kCuesHeader));
3743 cluster = GenerateCluster(46, 66, 5, true);
3744 data.insert(data.end(), cluster->data(), cluster->data() + cluster->size());
3745 AppendData(&*data.begin(), data.size());
3747 CheckExpectedRanges(kSourceId, "{ [0,115) }");
3750 TEST_F(ChunkDemuxerTest, CuesBetweenClusters) {
3751 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3753 AppendCluster(GenerateCluster(0, 0, 4));
3754 AppendData(kCuesHeader, sizeof(kCuesHeader));
3755 AppendCluster(GenerateCluster(46, 66, 5));
3756 CheckExpectedRanges(kSourceId, "{ [0,115) }");
3759 } // namespace media