Removing dead code from MockScreenOrientationController.
[chromium-blink-merge.git] / media / filters / chunk_demuxer_unittest.cc
blob294d7692535b9f9a7ae0b78c969a4cce7811867b
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include <algorithm>
7 #include "base/bind.h"
8 #include "base/message_loop/message_loop.h"
9 #include "base/strings/string_number_conversions.h"
10 #include "base/strings/string_split.h"
11 #include "base/strings/string_util.h"
12 #include "media/base/audio_decoder_config.h"
13 #include "media/base/decoder_buffer.h"
14 #include "media/base/decrypt_config.h"
15 #include "media/base/mock_demuxer_host.h"
16 #include "media/base/test_data_util.h"
17 #include "media/base/test_helpers.h"
18 #include "media/filters/chunk_demuxer.h"
19 #include "media/formats/webm/cluster_builder.h"
20 #include "media/formats/webm/webm_constants.h"
21 #include "media/formats/webm/webm_crypto_helpers.h"
22 #include "testing/gtest/include/gtest/gtest.h"
24 using ::testing::AnyNumber;
25 using ::testing::Exactly;
26 using ::testing::InSequence;
27 using ::testing::NotNull;
28 using ::testing::Return;
29 using ::testing::SaveArg;
30 using ::testing::SetArgumentPointee;
31 using ::testing::Values;
32 using ::testing::_;
34 namespace media {
36 const uint8 kTracksHeader[] = {
37 0x16, 0x54, 0xAE, 0x6B, // Tracks ID
38 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // tracks(size = 0)
41 // WebM Block bytes that represent a VP8 keyframe.
42 const uint8 kVP8Keyframe[] = {
43 0x010, 0x00, 0x00, 0x9d, 0x01, 0x2a, 0x00, 0x10, 0x00, 0x10, 0x00
46 // WebM Block bytes that represent a VP8 interframe.
47 const uint8 kVP8Interframe[] = { 0x11, 0x00, 0x00 };
49 const int kTracksHeaderSize = sizeof(kTracksHeader);
50 const int kTracksSizeOffset = 4;
52 // The size of TrackEntry element in test file "webm_vorbis_track_entry" starts
53 // at index 1 and spans 8 bytes.
54 const int kAudioTrackSizeOffset = 1;
55 const int kAudioTrackSizeWidth = 8;
56 const int kAudioTrackEntryHeaderSize =
57 kAudioTrackSizeOffset + kAudioTrackSizeWidth;
59 // The size of TrackEntry element in test file "webm_vp8_track_entry" starts at
60 // index 1 and spans 8 bytes.
61 const int kVideoTrackSizeOffset = 1;
62 const int kVideoTrackSizeWidth = 8;
63 const int kVideoTrackEntryHeaderSize =
64 kVideoTrackSizeOffset + kVideoTrackSizeWidth;
66 const int kVideoTrackNum = 1;
67 const int kAudioTrackNum = 2;
68 const int kTextTrackNum = 3;
69 const int kAlternateTextTrackNum = 4;
71 const int kAudioBlockDuration = 23;
72 const int kVideoBlockDuration = 33;
73 const int kTextBlockDuration = 100;
74 const int kBlockSize = 10;
76 const char kSourceId[] = "SourceId";
77 const char kDefaultFirstClusterRange[] = "{ [0,46) }";
78 const int kDefaultFirstClusterEndTimestamp = 66;
79 const int kDefaultSecondClusterEndTimestamp = 132;
81 base::TimeDelta kDefaultDuration() {
82 return base::TimeDelta::FromMilliseconds(201224);
85 // Write an integer into buffer in the form of vint that spans 8 bytes.
86 // The data pointed by |buffer| should be at least 8 bytes long.
87 // |number| should be in the range 0 <= number < 0x00FFFFFFFFFFFFFF.
88 static void WriteInt64(uint8* buffer, int64 number) {
89 DCHECK(number >= 0 && number < 0x00FFFFFFFFFFFFFFLL);
90 buffer[0] = 0x01;
91 int64 tmp = number;
92 for (int i = 7; i > 0; i--) {
93 buffer[i] = tmp & 0xff;
94 tmp >>= 8;
98 MATCHER_P(HasTimestamp, timestamp_in_ms, "") {
99 return arg.get() && !arg->end_of_stream() &&
100 arg->timestamp().InMilliseconds() == timestamp_in_ms;
103 MATCHER(IsEndOfStream, "") { return arg.get() && arg->end_of_stream(); }
105 static void OnReadDone(const base::TimeDelta& expected_time,
106 bool* called,
107 DemuxerStream::Status status,
108 const scoped_refptr<DecoderBuffer>& buffer) {
109 EXPECT_EQ(status, DemuxerStream::kOk);
110 EXPECT_EQ(expected_time, buffer->timestamp());
111 *called = true;
114 static void OnReadDone_AbortExpected(
115 bool* called, DemuxerStream::Status status,
116 const scoped_refptr<DecoderBuffer>& buffer) {
117 EXPECT_EQ(status, DemuxerStream::kAborted);
118 EXPECT_EQ(NULL, buffer.get());
119 *called = true;
122 static void OnReadDone_EOSExpected(bool* called,
123 DemuxerStream::Status status,
124 const scoped_refptr<DecoderBuffer>& buffer) {
125 EXPECT_EQ(status, DemuxerStream::kOk);
126 EXPECT_TRUE(buffer->end_of_stream());
127 *called = true;
130 static void OnSeekDone_OKExpected(bool* called, PipelineStatus status) {
131 EXPECT_EQ(status, PIPELINE_OK);
132 *called = true;
135 static void LogFunc(const std::string& str) { DVLOG(1) << str; }
137 // Test parameter determines which coded frame processor is used to process
138 // appended data. If true, LegacyFrameProcessor is used. Otherwise, the new
139 // FrameProcessor is used.
140 class ChunkDemuxerTest : public ::testing::TestWithParam<bool> {
141 protected:
142 enum CodecsIndex {
143 AUDIO,
144 VIDEO,
145 MAX_CODECS_INDEX
148 // Default cluster to append first for simple tests.
149 scoped_ptr<Cluster> kDefaultFirstCluster() {
150 return GenerateCluster(0, 4);
153 // Default cluster to append after kDefaultFirstCluster()
154 // has been appended. This cluster starts with blocks that
155 // have timestamps consistent with the end times of the blocks
156 // in kDefaultFirstCluster() so that these two clusters represent
157 // a continuous region.
158 scoped_ptr<Cluster> kDefaultSecondCluster() {
159 return GenerateCluster(46, 66, 5);
162 ChunkDemuxerTest()
163 : append_window_end_for_next_append_(kInfiniteDuration()) {
164 use_legacy_frame_processor_ = GetParam();
165 CreateNewDemuxer();
168 void CreateNewDemuxer() {
169 base::Closure open_cb =
170 base::Bind(&ChunkDemuxerTest::DemuxerOpened, base::Unretained(this));
171 Demuxer::NeedKeyCB need_key_cb =
172 base::Bind(&ChunkDemuxerTest::DemuxerNeedKey, base::Unretained(this));
173 demuxer_.reset(
174 new ChunkDemuxer(open_cb, need_key_cb, base::Bind(&LogFunc), true));
177 virtual ~ChunkDemuxerTest() {
178 ShutdownDemuxer();
181 void CreateInitSegment(int stream_flags,
182 bool is_audio_encrypted,
183 bool is_video_encrypted,
184 scoped_ptr<uint8[]>* buffer,
185 int* size) {
186 CreateInitSegmentInternal(
187 stream_flags, is_audio_encrypted, is_video_encrypted, buffer, false,
188 size);
191 void CreateInitSegmentWithAlternateTextTrackNum(int stream_flags,
192 bool is_audio_encrypted,
193 bool is_video_encrypted,
194 scoped_ptr<uint8[]>* buffer,
195 int* size) {
196 DCHECK(stream_flags & HAS_TEXT);
197 CreateInitSegmentInternal(
198 stream_flags, is_audio_encrypted, is_video_encrypted, buffer, true,
199 size);
202 void CreateInitSegmentInternal(int stream_flags,
203 bool is_audio_encrypted,
204 bool is_video_encrypted,
205 scoped_ptr<uint8[]>* buffer,
206 bool use_alternate_text_track_id,
207 int* size) {
208 bool has_audio = (stream_flags & HAS_AUDIO) != 0;
209 bool has_video = (stream_flags & HAS_VIDEO) != 0;
210 bool has_text = (stream_flags & HAS_TEXT) != 0;
211 scoped_refptr<DecoderBuffer> ebml_header;
212 scoped_refptr<DecoderBuffer> info;
213 scoped_refptr<DecoderBuffer> audio_track_entry;
214 scoped_refptr<DecoderBuffer> video_track_entry;
215 scoped_refptr<DecoderBuffer> audio_content_encodings;
216 scoped_refptr<DecoderBuffer> video_content_encodings;
217 scoped_refptr<DecoderBuffer> text_track_entry;
219 ebml_header = ReadTestDataFile("webm_ebml_element");
221 info = ReadTestDataFile("webm_info_element");
223 int tracks_element_size = 0;
225 if (has_audio) {
226 audio_track_entry = ReadTestDataFile("webm_vorbis_track_entry");
227 tracks_element_size += audio_track_entry->data_size();
228 if (is_audio_encrypted) {
229 audio_content_encodings = ReadTestDataFile("webm_content_encodings");
230 tracks_element_size += audio_content_encodings->data_size();
234 if (has_video) {
235 video_track_entry = ReadTestDataFile("webm_vp8_track_entry");
236 tracks_element_size += video_track_entry->data_size();
237 if (is_video_encrypted) {
238 video_content_encodings = ReadTestDataFile("webm_content_encodings");
239 tracks_element_size += video_content_encodings->data_size();
243 if (has_text) {
244 // TODO(matthewjheaney): create an abstraction to do
245 // this (http://crbug/321454).
246 // We need it to also handle the creation of multiple text tracks.
248 // This is the track entry for a text track,
249 // TrackEntry [AE], size=30
250 // TrackNum [D7], size=1, val=3 (or 4 if use_alternate_text_track_id)
251 // TrackUID [73] [C5], size=1, value=3 (must remain constant for same
252 // track, even if TrackNum changes)
253 // TrackType [83], size=1, val=0x11
254 // CodecId [86], size=18, val="D_WEBVTT/SUBTITLES"
255 char str[] = "\xAE\x9E\xD7\x81\x03\x73\xC5\x81\x03"
256 "\x83\x81\x11\x86\x92"
257 "D_WEBVTT/SUBTITLES";
258 DCHECK_EQ(str[4], kTextTrackNum);
259 if (use_alternate_text_track_id)
260 str[4] = kAlternateTextTrackNum;
262 const int len = strlen(str);
263 DCHECK_EQ(len, 32);
264 const uint8* const buf = reinterpret_cast<const uint8*>(str);
265 text_track_entry = DecoderBuffer::CopyFrom(buf, len);
266 tracks_element_size += text_track_entry->data_size();
269 *size = ebml_header->data_size() + info->data_size() +
270 kTracksHeaderSize + tracks_element_size;
272 buffer->reset(new uint8[*size]);
274 uint8* buf = buffer->get();
275 memcpy(buf, ebml_header->data(), ebml_header->data_size());
276 buf += ebml_header->data_size();
278 memcpy(buf, info->data(), info->data_size());
279 buf += info->data_size();
281 memcpy(buf, kTracksHeader, kTracksHeaderSize);
282 WriteInt64(buf + kTracksSizeOffset, tracks_element_size);
283 buf += kTracksHeaderSize;
285 // TODO(xhwang): Simplify this! Probably have test data files that contain
286 // ContentEncodings directly instead of trying to create one at run-time.
287 if (has_audio) {
288 memcpy(buf, audio_track_entry->data(),
289 audio_track_entry->data_size());
290 if (is_audio_encrypted) {
291 memcpy(buf + audio_track_entry->data_size(),
292 audio_content_encodings->data(),
293 audio_content_encodings->data_size());
294 WriteInt64(buf + kAudioTrackSizeOffset,
295 audio_track_entry->data_size() +
296 audio_content_encodings->data_size() -
297 kAudioTrackEntryHeaderSize);
298 buf += audio_content_encodings->data_size();
300 buf += audio_track_entry->data_size();
303 if (has_video) {
304 memcpy(buf, video_track_entry->data(),
305 video_track_entry->data_size());
306 if (is_video_encrypted) {
307 memcpy(buf + video_track_entry->data_size(),
308 video_content_encodings->data(),
309 video_content_encodings->data_size());
310 WriteInt64(buf + kVideoTrackSizeOffset,
311 video_track_entry->data_size() +
312 video_content_encodings->data_size() -
313 kVideoTrackEntryHeaderSize);
314 buf += video_content_encodings->data_size();
316 buf += video_track_entry->data_size();
319 if (has_text) {
320 memcpy(buf, text_track_entry->data(),
321 text_track_entry->data_size());
322 buf += text_track_entry->data_size();
326 ChunkDemuxer::Status AddId() {
327 return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
330 ChunkDemuxer::Status AddId(const std::string& source_id, int stream_flags) {
331 bool has_audio = (stream_flags & HAS_AUDIO) != 0;
332 bool has_video = (stream_flags & HAS_VIDEO) != 0;
333 std::vector<std::string> codecs;
334 std::string type;
336 if (has_audio) {
337 codecs.push_back("vorbis");
338 type = "audio/webm";
341 if (has_video) {
342 codecs.push_back("vp8");
343 type = "video/webm";
346 if (!has_audio && !has_video) {
347 return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
350 return demuxer_->AddId(source_id, type, codecs,
351 use_legacy_frame_processor_);
354 ChunkDemuxer::Status AddIdForMp2tSource(const std::string& source_id) {
355 std::vector<std::string> codecs;
356 std::string type = "video/mp2t";
357 codecs.push_back("mp4a.40.2");
358 codecs.push_back("avc1.640028");
359 return demuxer_->AddId(source_id, type, codecs,
360 use_legacy_frame_processor_);
363 void AppendData(const uint8* data, size_t length) {
364 AppendData(kSourceId, data, length);
367 void AppendCluster(const std::string& source_id,
368 scoped_ptr<Cluster> cluster) {
369 AppendData(source_id, cluster->data(), cluster->size());
372 void AppendCluster(scoped_ptr<Cluster> cluster) {
373 AppendCluster(kSourceId, cluster.Pass());
376 void AppendCluster(int timecode, int block_count) {
377 AppendCluster(GenerateCluster(timecode, block_count));
380 void AppendSingleStreamCluster(const std::string& source_id, int track_number,
381 int timecode, int block_count) {
382 int block_duration = 0;
383 switch (track_number) {
384 case kVideoTrackNum:
385 block_duration = kVideoBlockDuration;
386 break;
387 case kAudioTrackNum:
388 block_duration = kAudioBlockDuration;
389 break;
390 case kTextTrackNum: // Fall-through.
391 case kAlternateTextTrackNum:
392 block_duration = kTextBlockDuration;
393 break;
395 ASSERT_NE(block_duration, 0);
396 int end_timecode = timecode + block_count * block_duration;
397 AppendCluster(source_id,
398 GenerateSingleStreamCluster(
399 timecode, end_timecode, track_number, block_duration));
402 // |cluster_description| - A space delimited string of buffer info that
403 // is used to construct a cluster. Each buffer info is a timestamp in
404 // milliseconds and optionally followed by a 'K' to indicate that a buffer
405 // should be marked as a keyframe. For example "0K 30 60" should constuct
406 // a cluster with 3 blocks: a keyframe with timestamp 0 and 2 non-keyframes
407 // at 30ms and 60ms.
408 void AppendSingleStreamCluster(const std::string& source_id, int track_number,
409 const std::string& cluster_description) {
410 std::vector<std::string> timestamps;
411 base::SplitString(cluster_description, ' ', &timestamps);
413 ClusterBuilder cb;
414 std::vector<uint8> data(10);
415 for (size_t i = 0; i < timestamps.size(); ++i) {
416 std::string timestamp_str = timestamps[i];
417 int block_flags = 0;
418 if (EndsWith(timestamp_str, "K", true)) {
419 block_flags = kWebMFlagKeyframe;
420 // Remove the "K" off of the token.
421 timestamp_str = timestamp_str.substr(0, timestamps[i].length() - 1);
423 int timestamp_in_ms;
424 CHECK(base::StringToInt(timestamp_str, &timestamp_in_ms));
426 if (i == 0)
427 cb.SetClusterTimecode(timestamp_in_ms);
429 if (track_number == kTextTrackNum ||
430 track_number == kAlternateTextTrackNum) {
431 cb.AddBlockGroup(track_number, timestamp_in_ms, kTextBlockDuration,
432 block_flags, &data[0], data.size());
433 } else {
434 cb.AddSimpleBlock(track_number, timestamp_in_ms, block_flags,
435 &data[0], data.size());
438 AppendCluster(source_id, cb.Finish());
441 void AppendData(const std::string& source_id,
442 const uint8* data, size_t length) {
443 EXPECT_CALL(host_, AddBufferedTimeRange(_, _)).Times(AnyNumber());
445 // TODO(wolenetz): Test timestamp offset updating once "sequence" append
446 // mode processing is implemented. See http://crbug.com/249422.
447 demuxer_->AppendData(source_id, data, length,
448 append_window_start_for_next_append_,
449 append_window_end_for_next_append_,
450 &timestamp_offset_map_[source_id]);
453 void AppendDataInPieces(const uint8* data, size_t length) {
454 AppendDataInPieces(data, length, 7);
457 void AppendDataInPieces(const uint8* data, size_t length, size_t piece_size) {
458 const uint8* start = data;
459 const uint8* end = data + length;
460 while (start < end) {
461 size_t append_size = std::min(piece_size,
462 static_cast<size_t>(end - start));
463 AppendData(start, append_size);
464 start += append_size;
468 void AppendInitSegment(int stream_flags) {
469 AppendInitSegmentWithSourceId(kSourceId, stream_flags);
472 void AppendInitSegmentWithSourceId(const std::string& source_id,
473 int stream_flags) {
474 AppendInitSegmentWithEncryptedInfo(source_id, stream_flags, false, false);
477 void AppendInitSegmentWithEncryptedInfo(const std::string& source_id,
478 int stream_flags,
479 bool is_audio_encrypted,
480 bool is_video_encrypted) {
481 scoped_ptr<uint8[]> info_tracks;
482 int info_tracks_size = 0;
483 CreateInitSegment(stream_flags,
484 is_audio_encrypted, is_video_encrypted,
485 &info_tracks, &info_tracks_size);
486 AppendData(source_id, info_tracks.get(), info_tracks_size);
489 void AppendGarbage() {
490 // Fill up an array with gibberish.
491 int garbage_cluster_size = 10;
492 scoped_ptr<uint8[]> garbage_cluster(new uint8[garbage_cluster_size]);
493 for (int i = 0; i < garbage_cluster_size; ++i)
494 garbage_cluster[i] = i;
495 AppendData(garbage_cluster.get(), garbage_cluster_size);
498 void InitDoneCalled(PipelineStatus expected_status,
499 PipelineStatus status) {
500 EXPECT_EQ(status, expected_status);
503 void AppendEmptyCluster(int timecode) {
504 AppendCluster(GenerateEmptyCluster(timecode));
507 PipelineStatusCB CreateInitDoneCB(const base::TimeDelta& expected_duration,
508 PipelineStatus expected_status) {
509 if (expected_duration != kNoTimestamp())
510 EXPECT_CALL(host_, SetDuration(expected_duration));
511 return CreateInitDoneCB(expected_status);
514 PipelineStatusCB CreateInitDoneCB(PipelineStatus expected_status) {
515 return base::Bind(&ChunkDemuxerTest::InitDoneCalled,
516 base::Unretained(this),
517 expected_status);
520 enum StreamFlags {
521 HAS_AUDIO = 1 << 0,
522 HAS_VIDEO = 1 << 1,
523 HAS_TEXT = 1 << 2
526 bool InitDemuxer(int stream_flags) {
527 return InitDemuxerWithEncryptionInfo(stream_flags, false, false);
530 bool InitDemuxerWithEncryptionInfo(
531 int stream_flags, bool is_audio_encrypted, bool is_video_encrypted) {
533 PipelineStatus expected_status =
534 (stream_flags != 0) ? PIPELINE_OK : DEMUXER_ERROR_COULD_NOT_OPEN;
536 base::TimeDelta expected_duration = kNoTimestamp();
537 if (expected_status == PIPELINE_OK)
538 expected_duration = kDefaultDuration();
540 EXPECT_CALL(*this, DemuxerOpened());
541 demuxer_->Initialize(
542 &host_, CreateInitDoneCB(expected_duration, expected_status), true);
544 if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
545 return false;
547 AppendInitSegmentWithEncryptedInfo(
548 kSourceId, stream_flags,
549 is_audio_encrypted, is_video_encrypted);
550 return true;
553 bool InitDemuxerAudioAndVideoSourcesText(const std::string& audio_id,
554 const std::string& video_id,
555 bool has_text) {
556 EXPECT_CALL(*this, DemuxerOpened());
557 demuxer_->Initialize(
558 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
560 if (AddId(audio_id, HAS_AUDIO) != ChunkDemuxer::kOk)
561 return false;
562 if (AddId(video_id, HAS_VIDEO) != ChunkDemuxer::kOk)
563 return false;
565 int audio_flags = HAS_AUDIO;
566 int video_flags = HAS_VIDEO;
568 if (has_text) {
569 audio_flags |= HAS_TEXT;
570 video_flags |= HAS_TEXT;
573 AppendInitSegmentWithSourceId(audio_id, audio_flags);
574 AppendInitSegmentWithSourceId(video_id, video_flags);
575 return true;
578 bool InitDemuxerAudioAndVideoSources(const std::string& audio_id,
579 const std::string& video_id) {
580 return InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, false);
583 // Initializes the demuxer with data from 2 files with different
584 // decoder configurations. This is used to test the decoder config change
585 // logic.
587 // bear-320x240.webm VideoDecoderConfig returns 320x240 for its natural_size()
588 // bear-640x360.webm VideoDecoderConfig returns 640x360 for its natural_size()
589 // The resulting video stream returns data from each file for the following
590 // time ranges.
591 // bear-320x240.webm : [0-501) [801-2736)
592 // bear-640x360.webm : [527-793)
594 // bear-320x240.webm AudioDecoderConfig returns 3863 for its extra_data_size()
595 // bear-640x360.webm AudioDecoderConfig returns 3935 for its extra_data_size()
596 // The resulting audio stream returns data from each file for the following
597 // time ranges.
598 // bear-320x240.webm : [0-524) [779-2736)
599 // bear-640x360.webm : [527-759)
600 bool InitDemuxerWithConfigChangeData() {
601 scoped_refptr<DecoderBuffer> bear1 = ReadTestDataFile("bear-320x240.webm");
602 scoped_refptr<DecoderBuffer> bear2 = ReadTestDataFile("bear-640x360.webm");
604 EXPECT_CALL(*this, DemuxerOpened());
606 demuxer_->Initialize(
607 &host_, CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744),
608 PIPELINE_OK), true);
610 if (AddId(kSourceId, HAS_AUDIO | HAS_VIDEO) != ChunkDemuxer::kOk)
611 return false;
613 // Append the whole bear1 file.
614 // TODO(wolenetz/acolwell): Remove this extra SetDuration expectation once
615 // the files are fixed to have the correct duration in their init segments,
616 // and the CreateInitDoneCB() call, above, is fixed to used that duration.
617 // See http://crbug.com/354284.
618 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
619 AppendData(bear1->data(), bear1->data_size());
620 // Last audio frame has timestamp 2721 and duration 24 (estimated from max
621 // seen so far for audio track).
622 // Last video frame has timestamp 2703 and duration 33 (from TrackEntry
623 // DefaultDuration for video track).
624 CheckExpectedRanges(kSourceId, "{ [0,2736) }");
626 // Append initialization segment for bear2.
627 // Note: Offsets here and below are derived from
628 // media/test/data/bear-640x360-manifest.js and
629 // media/test/data/bear-320x240-manifest.js which were
630 // generated from media/test/data/bear-640x360.webm and
631 // media/test/data/bear-320x240.webm respectively.
632 AppendData(bear2->data(), 4340);
634 // Append a media segment that goes from [0.527000, 1.014000).
635 AppendData(bear2->data() + 55290, 18785);
636 CheckExpectedRanges(kSourceId, "{ [0,1027) [1201,2736) }");
638 // Append initialization segment for bear1 & fill gap with [779-1197)
639 // segment.
640 AppendData(bear1->data(), 4370);
641 AppendData(bear1->data() + 72737, 28183);
642 CheckExpectedRanges(kSourceId, "{ [0,2736) }");
644 MarkEndOfStream(PIPELINE_OK);
645 return true;
648 void ShutdownDemuxer() {
649 if (demuxer_) {
650 demuxer_->Shutdown();
651 message_loop_.RunUntilIdle();
655 void AddSimpleBlock(ClusterBuilder* cb, int track_num, int64 timecode) {
656 uint8 data[] = { 0x00 };
657 cb->AddSimpleBlock(track_num, timecode, 0, data, sizeof(data));
660 scoped_ptr<Cluster> GenerateCluster(int timecode, int block_count) {
661 return GenerateCluster(timecode, timecode, block_count);
664 void AddVideoBlockGroup(ClusterBuilder* cb, int track_num, int64 timecode,
665 int duration, int flags) {
666 const uint8* data =
667 (flags & kWebMFlagKeyframe) != 0 ? kVP8Keyframe : kVP8Interframe;
668 int size = (flags & kWebMFlagKeyframe) != 0 ? sizeof(kVP8Keyframe) :
669 sizeof(kVP8Interframe);
670 cb->AddBlockGroup(track_num, timecode, duration, flags, data, size);
673 scoped_ptr<Cluster> GenerateCluster(int first_audio_timecode,
674 int first_video_timecode,
675 int block_count) {
676 CHECK_GT(block_count, 0);
678 int size = 10;
679 scoped_ptr<uint8[]> data(new uint8[size]);
681 ClusterBuilder cb;
682 cb.SetClusterTimecode(std::min(first_audio_timecode, first_video_timecode));
684 if (block_count == 1) {
685 cb.AddBlockGroup(kAudioTrackNum, first_audio_timecode,
686 kAudioBlockDuration, kWebMFlagKeyframe,
687 data.get(), size);
688 return cb.Finish();
691 int audio_timecode = first_audio_timecode;
692 int video_timecode = first_video_timecode;
694 // Create simple blocks for everything except the last 2 blocks.
695 // The first video frame must be a keyframe.
696 uint8 video_flag = kWebMFlagKeyframe;
697 for (int i = 0; i < block_count - 2; i++) {
698 if (audio_timecode <= video_timecode) {
699 cb.AddSimpleBlock(kAudioTrackNum, audio_timecode, kWebMFlagKeyframe,
700 data.get(), size);
701 audio_timecode += kAudioBlockDuration;
702 continue;
705 cb.AddSimpleBlock(kVideoTrackNum, video_timecode, video_flag, data.get(),
706 size);
707 video_timecode += kVideoBlockDuration;
708 video_flag = 0;
711 // Make the last 2 blocks BlockGroups so that they don't get delayed by the
712 // block duration calculation logic.
713 if (audio_timecode <= video_timecode) {
714 cb.AddBlockGroup(kAudioTrackNum, audio_timecode, kAudioBlockDuration,
715 kWebMFlagKeyframe, data.get(), size);
716 AddVideoBlockGroup(&cb, kVideoTrackNum, video_timecode,
717 kVideoBlockDuration, video_flag);
718 } else {
719 AddVideoBlockGroup(&cb, kVideoTrackNum, video_timecode,
720 kVideoBlockDuration, video_flag);
721 cb.AddBlockGroup(kAudioTrackNum, audio_timecode, kAudioBlockDuration,
722 kWebMFlagKeyframe, data.get(), size);
725 return cb.Finish();
728 scoped_ptr<Cluster> GenerateSingleStreamCluster(int timecode,
729 int end_timecode,
730 int track_number,
731 int block_duration) {
732 CHECK_GT(end_timecode, timecode);
734 std::vector<uint8> data(kBlockSize);
736 ClusterBuilder cb;
737 cb.SetClusterTimecode(timecode);
739 // Create simple blocks for everything except the last block.
740 while (timecode < (end_timecode - block_duration)) {
741 cb.AddSimpleBlock(track_number, timecode, kWebMFlagKeyframe,
742 &data[0], data.size());
743 timecode += block_duration;
746 if (track_number == kVideoTrackNum) {
747 AddVideoBlockGroup(&cb, track_number, timecode, block_duration,
748 kWebMFlagKeyframe);
749 } else {
750 cb.AddBlockGroup(track_number, timecode, block_duration,
751 kWebMFlagKeyframe, &data[0], data.size());
754 return cb.Finish();
757 void Read(DemuxerStream::Type type, const DemuxerStream::ReadCB& read_cb) {
758 demuxer_->GetStream(type)->Read(read_cb);
759 message_loop_.RunUntilIdle();
762 void ReadAudio(const DemuxerStream::ReadCB& read_cb) {
763 Read(DemuxerStream::AUDIO, read_cb);
766 void ReadVideo(const DemuxerStream::ReadCB& read_cb) {
767 Read(DemuxerStream::VIDEO, read_cb);
770 void GenerateExpectedReads(int timecode, int block_count) {
771 GenerateExpectedReads(timecode, timecode, block_count);
774 void GenerateExpectedReads(int start_audio_timecode,
775 int start_video_timecode,
776 int block_count) {
777 CHECK_GT(block_count, 0);
779 if (block_count == 1) {
780 ExpectRead(DemuxerStream::AUDIO, start_audio_timecode);
781 return;
784 int audio_timecode = start_audio_timecode;
785 int video_timecode = start_video_timecode;
787 for (int i = 0; i < block_count; i++) {
788 if (audio_timecode <= video_timecode) {
789 ExpectRead(DemuxerStream::AUDIO, audio_timecode);
790 audio_timecode += kAudioBlockDuration;
791 continue;
794 ExpectRead(DemuxerStream::VIDEO, video_timecode);
795 video_timecode += kVideoBlockDuration;
799 void GenerateSingleStreamExpectedReads(int timecode,
800 int block_count,
801 DemuxerStream::Type type,
802 int block_duration) {
803 CHECK_GT(block_count, 0);
804 int stream_timecode = timecode;
806 for (int i = 0; i < block_count; i++) {
807 ExpectRead(type, stream_timecode);
808 stream_timecode += block_duration;
812 void GenerateAudioStreamExpectedReads(int timecode, int block_count) {
813 GenerateSingleStreamExpectedReads(
814 timecode, block_count, DemuxerStream::AUDIO, kAudioBlockDuration);
817 void GenerateVideoStreamExpectedReads(int timecode, int block_count) {
818 GenerateSingleStreamExpectedReads(
819 timecode, block_count, DemuxerStream::VIDEO, kVideoBlockDuration);
822 scoped_ptr<Cluster> GenerateEmptyCluster(int timecode) {
823 ClusterBuilder cb;
824 cb.SetClusterTimecode(timecode);
825 return cb.Finish();
828 void CheckExpectedRanges(const std::string& expected) {
829 CheckExpectedRanges(kSourceId, expected);
832 void CheckExpectedRanges(const std::string& id,
833 const std::string& expected) {
834 Ranges<base::TimeDelta> r = demuxer_->GetBufferedRanges(id);
836 std::stringstream ss;
837 ss << "{ ";
838 for (size_t i = 0; i < r.size(); ++i) {
839 ss << "[" << r.start(i).InMilliseconds() << ","
840 << r.end(i).InMilliseconds() << ") ";
842 ss << "}";
843 EXPECT_EQ(expected, ss.str());
846 MOCK_METHOD2(ReadDone, void(DemuxerStream::Status status,
847 const scoped_refptr<DecoderBuffer>&));
849 void StoreStatusAndBuffer(DemuxerStream::Status* status_out,
850 scoped_refptr<DecoderBuffer>* buffer_out,
851 DemuxerStream::Status status,
852 const scoped_refptr<DecoderBuffer>& buffer) {
853 *status_out = status;
854 *buffer_out = buffer;
857 void ReadUntilNotOkOrEndOfStream(DemuxerStream::Type type,
858 DemuxerStream::Status* status,
859 base::TimeDelta* last_timestamp) {
860 DemuxerStream* stream = demuxer_->GetStream(type);
861 scoped_refptr<DecoderBuffer> buffer;
863 *last_timestamp = kNoTimestamp();
864 do {
865 stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
866 base::Unretained(this), status, &buffer));
867 base::MessageLoop::current()->RunUntilIdle();
868 if (*status == DemuxerStream::kOk && !buffer->end_of_stream())
869 *last_timestamp = buffer->timestamp();
870 } while (*status == DemuxerStream::kOk && !buffer->end_of_stream());
873 void ExpectEndOfStream(DemuxerStream::Type type) {
874 EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk, IsEndOfStream()));
875 demuxer_->GetStream(type)->Read(base::Bind(
876 &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
877 message_loop_.RunUntilIdle();
880 void ExpectRead(DemuxerStream::Type type, int64 timestamp_in_ms) {
881 EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk,
882 HasTimestamp(timestamp_in_ms)));
883 demuxer_->GetStream(type)->Read(base::Bind(
884 &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
885 message_loop_.RunUntilIdle();
888 void ExpectConfigChanged(DemuxerStream::Type type) {
889 EXPECT_CALL(*this, ReadDone(DemuxerStream::kConfigChanged, _));
890 demuxer_->GetStream(type)->Read(base::Bind(
891 &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
892 message_loop_.RunUntilIdle();
895 void CheckExpectedBuffers(DemuxerStream* stream,
896 const std::string& expected) {
897 std::vector<std::string> timestamps;
898 base::SplitString(expected, ' ', &timestamps);
899 std::stringstream ss;
900 for (size_t i = 0; i < timestamps.size(); ++i) {
901 // Initialize status to kAborted since it's possible for Read() to return
902 // without calling StoreStatusAndBuffer() if it doesn't have any buffers
903 // left to return.
904 DemuxerStream::Status status = DemuxerStream::kAborted;
905 scoped_refptr<DecoderBuffer> buffer;
906 stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
907 base::Unretained(this), &status, &buffer));
908 base::MessageLoop::current()->RunUntilIdle();
909 if (status != DemuxerStream::kOk || buffer->end_of_stream())
910 break;
912 if (i > 0)
913 ss << " ";
914 ss << buffer->timestamp().InMilliseconds();
916 // Handle preroll buffers.
917 if (EndsWith(timestamps[i], "P", true)) {
918 ASSERT_EQ(kInfiniteDuration(), buffer->discard_padding().first);
919 ASSERT_EQ(base::TimeDelta(), buffer->discard_padding().second);
920 ss << "P";
923 EXPECT_EQ(expected, ss.str());
926 MOCK_METHOD1(Checkpoint, void(int id));
928 struct BufferTimestamps {
929 int video_time_ms;
930 int audio_time_ms;
932 static const int kSkip = -1;
934 // Test parsing a WebM file.
935 // |filename| - The name of the file in media/test/data to parse.
936 // |timestamps| - The expected timestamps on the parsed buffers.
937 // a timestamp of kSkip indicates that a Read() call for that stream
938 // shouldn't be made on that iteration of the loop. If both streams have
939 // a kSkip then the loop will terminate.
940 bool ParseWebMFile(const std::string& filename,
941 const BufferTimestamps* timestamps,
942 const base::TimeDelta& duration) {
943 return ParseWebMFile(filename, timestamps, duration, HAS_AUDIO | HAS_VIDEO);
946 bool ParseWebMFile(const std::string& filename,
947 const BufferTimestamps* timestamps,
948 const base::TimeDelta& duration,
949 int stream_flags) {
950 EXPECT_CALL(*this, DemuxerOpened());
951 demuxer_->Initialize(
952 &host_, CreateInitDoneCB(duration, PIPELINE_OK), true);
954 if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
955 return false;
957 // Read a WebM file into memory and send the data to the demuxer.
958 scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(filename);
959 AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
961 // Verify that the timestamps on the first few packets match what we
962 // expect.
963 for (size_t i = 0;
964 (timestamps[i].audio_time_ms != kSkip ||
965 timestamps[i].video_time_ms != kSkip);
966 i++) {
967 bool audio_read_done = false;
968 bool video_read_done = false;
970 if (timestamps[i].audio_time_ms != kSkip) {
971 ReadAudio(base::Bind(&OnReadDone,
972 base::TimeDelta::FromMilliseconds(
973 timestamps[i].audio_time_ms),
974 &audio_read_done));
975 EXPECT_TRUE(audio_read_done);
978 if (timestamps[i].video_time_ms != kSkip) {
979 ReadVideo(base::Bind(&OnReadDone,
980 base::TimeDelta::FromMilliseconds(
981 timestamps[i].video_time_ms),
982 &video_read_done));
983 EXPECT_TRUE(video_read_done);
987 return true;
990 MOCK_METHOD0(DemuxerOpened, void());
991 // TODO(xhwang): This is a workaround of the issue that move-only parameters
992 // are not supported in mocked methods. Remove this when the issue is fixed
993 // (http://code.google.com/p/googletest/issues/detail?id=395) or when we use
994 // std::string instead of scoped_ptr<uint8[]> (http://crbug.com/130689).
995 MOCK_METHOD3(NeedKeyMock, void(const std::string& type,
996 const uint8* init_data, int init_data_size));
997 void DemuxerNeedKey(const std::string& type,
998 const std::vector<uint8>& init_data) {
999 const uint8* init_data_ptr = init_data.empty() ? NULL : &init_data[0];
1000 NeedKeyMock(type, init_data_ptr, init_data.size());
1003 void Seek(base::TimeDelta seek_time) {
1004 demuxer_->StartWaitingForSeek(seek_time);
1005 demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
1006 message_loop_.RunUntilIdle();
1009 void MarkEndOfStream(PipelineStatus status) {
1010 demuxer_->MarkEndOfStream(status);
1011 message_loop_.RunUntilIdle();
1014 bool SetTimestampOffset(const std::string& id,
1015 base::TimeDelta timestamp_offset) {
1016 if (demuxer_->IsParsingMediaSegment(id))
1017 return false;
1019 timestamp_offset_map_[id] = timestamp_offset;
1020 return true;
1023 base::MessageLoop message_loop_;
1024 MockDemuxerHost host_;
1026 scoped_ptr<ChunkDemuxer> demuxer_;
1027 bool use_legacy_frame_processor_;
1029 base::TimeDelta append_window_start_for_next_append_;
1030 base::TimeDelta append_window_end_for_next_append_;
1032 // Map of source id to timestamp offset to use for the next AppendData()
1033 // operation for that source id.
1034 std::map<std::string, base::TimeDelta> timestamp_offset_map_;
1036 private:
1037 DISALLOW_COPY_AND_ASSIGN(ChunkDemuxerTest);
1040 TEST_P(ChunkDemuxerTest, Init) {
1041 // Test no streams, audio-only, video-only, and audio & video scenarios.
1042 // Audio and video streams can be encrypted or not encrypted.
1043 for (int i = 0; i < 16; i++) {
1044 bool has_audio = (i & 0x1) != 0;
1045 bool has_video = (i & 0x2) != 0;
1046 bool is_audio_encrypted = (i & 0x4) != 0;
1047 bool is_video_encrypted = (i & 0x8) != 0;
1049 // No test on invalid combination.
1050 if ((!has_audio && is_audio_encrypted) ||
1051 (!has_video && is_video_encrypted)) {
1052 continue;
1055 CreateNewDemuxer();
1057 if (is_audio_encrypted || is_video_encrypted) {
1058 int need_key_count = (is_audio_encrypted ? 1 : 0) +
1059 (is_video_encrypted ? 1 : 0);
1060 EXPECT_CALL(*this, NeedKeyMock(kWebMEncryptInitDataType, NotNull(),
1061 DecryptConfig::kDecryptionKeySize))
1062 .Times(Exactly(need_key_count));
1065 int stream_flags = 0;
1066 if (has_audio)
1067 stream_flags |= HAS_AUDIO;
1069 if (has_video)
1070 stream_flags |= HAS_VIDEO;
1072 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1073 stream_flags, is_audio_encrypted, is_video_encrypted));
1075 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1076 if (has_audio) {
1077 ASSERT_TRUE(audio_stream);
1079 const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
1080 EXPECT_EQ(kCodecVorbis, config.codec());
1081 EXPECT_EQ(32, config.bits_per_channel());
1082 EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
1083 EXPECT_EQ(44100, config.samples_per_second());
1084 EXPECT_TRUE(config.extra_data());
1085 EXPECT_GT(config.extra_data_size(), 0u);
1086 EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
1087 EXPECT_EQ(is_audio_encrypted,
1088 audio_stream->audio_decoder_config().is_encrypted());
1089 EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
1090 ->supports_partial_append_window_trimming());
1091 } else {
1092 EXPECT_FALSE(audio_stream);
1095 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1096 if (has_video) {
1097 EXPECT_TRUE(video_stream);
1098 EXPECT_EQ(is_video_encrypted,
1099 video_stream->video_decoder_config().is_encrypted());
1100 EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
1101 ->supports_partial_append_window_trimming());
1102 } else {
1103 EXPECT_FALSE(video_stream);
1106 ShutdownDemuxer();
1107 demuxer_.reset();
1111 // TODO(acolwell): Fold this test into Init tests since the tests are
1112 // almost identical.
1113 TEST_P(ChunkDemuxerTest, InitText) {
1114 // Test with 1 video stream and 1 text streams, and 0 or 1 audio streams.
1115 // No encryption cases handled here.
1116 bool has_video = true;
1117 bool is_audio_encrypted = false;
1118 bool is_video_encrypted = false;
1119 for (int i = 0; i < 2; i++) {
1120 bool has_audio = (i & 0x1) != 0;
1122 CreateNewDemuxer();
1124 DemuxerStream* text_stream = NULL;
1125 TextTrackConfig text_config;
1126 EXPECT_CALL(host_, AddTextStream(_, _))
1127 .WillOnce(DoAll(SaveArg<0>(&text_stream),
1128 SaveArg<1>(&text_config)));
1130 int stream_flags = HAS_TEXT;
1131 if (has_audio)
1132 stream_flags |= HAS_AUDIO;
1134 if (has_video)
1135 stream_flags |= HAS_VIDEO;
1137 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1138 stream_flags, is_audio_encrypted, is_video_encrypted));
1139 ASSERT_TRUE(text_stream);
1140 EXPECT_EQ(DemuxerStream::TEXT, text_stream->type());
1141 EXPECT_EQ(kTextSubtitles, text_config.kind());
1142 EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(text_stream)
1143 ->supports_partial_append_window_trimming());
1145 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1146 if (has_audio) {
1147 ASSERT_TRUE(audio_stream);
1149 const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
1150 EXPECT_EQ(kCodecVorbis, config.codec());
1151 EXPECT_EQ(32, config.bits_per_channel());
1152 EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
1153 EXPECT_EQ(44100, config.samples_per_second());
1154 EXPECT_TRUE(config.extra_data());
1155 EXPECT_GT(config.extra_data_size(), 0u);
1156 EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
1157 EXPECT_EQ(is_audio_encrypted,
1158 audio_stream->audio_decoder_config().is_encrypted());
1159 EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
1160 ->supports_partial_append_window_trimming());
1161 } else {
1162 EXPECT_FALSE(audio_stream);
1165 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1166 if (has_video) {
1167 EXPECT_TRUE(video_stream);
1168 EXPECT_EQ(is_video_encrypted,
1169 video_stream->video_decoder_config().is_encrypted());
1170 EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
1171 ->supports_partial_append_window_trimming());
1172 } else {
1173 EXPECT_FALSE(video_stream);
1176 ShutdownDemuxer();
1177 demuxer_.reset();
1181 TEST_P(ChunkDemuxerTest, SingleTextTrackIdChange) {
1182 // Test with 1 video stream, 1 audio, and 1 text stream. Send a second init
1183 // segment in which the text track ID changes. Verify appended buffers before
1184 // and after the second init segment map to the same underlying track buffers.
1185 CreateNewDemuxer();
1186 DemuxerStream* text_stream = NULL;
1187 TextTrackConfig text_config;
1188 EXPECT_CALL(host_, AddTextStream(_, _))
1189 .WillOnce(DoAll(SaveArg<0>(&text_stream),
1190 SaveArg<1>(&text_config)));
1191 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1192 HAS_TEXT | HAS_AUDIO | HAS_VIDEO, false, false));
1193 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1194 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1195 ASSERT_TRUE(audio_stream);
1196 ASSERT_TRUE(video_stream);
1197 ASSERT_TRUE(text_stream);
1199 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23K");
1200 AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 30");
1201 AppendSingleStreamCluster(kSourceId, kTextTrackNum, "10K");
1202 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1204 scoped_ptr<uint8[]> info_tracks;
1205 int info_tracks_size = 0;
1206 CreateInitSegmentWithAlternateTextTrackNum(HAS_TEXT | HAS_AUDIO | HAS_VIDEO,
1207 false, false,
1208 &info_tracks, &info_tracks_size);
1209 demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
1210 append_window_start_for_next_append_,
1211 append_window_end_for_next_append_,
1212 &timestamp_offset_map_[kSourceId]);
1214 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "46K 69K");
1215 AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "60K");
1216 AppendSingleStreamCluster(kSourceId, kAlternateTextTrackNum, "45K");
1218 CheckExpectedRanges(kSourceId, "{ [0,92) }");
1219 CheckExpectedBuffers(audio_stream, "0 23 46 69");
1220 CheckExpectedBuffers(video_stream, "0 30 60");
1221 CheckExpectedBuffers(text_stream, "10 45");
1223 ShutdownDemuxer();
1226 TEST_P(ChunkDemuxerTest, InitSegmentSetsNeedRandomAccessPointFlag) {
1227 // Tests that non-keyframes following an init segment are allowed
1228 // and dropped, as expected if the initialization segment received
1229 // algorithm correctly sets the needs random access point flag to true for all
1230 // track buffers. Note that the first initialization segment is insufficient
1231 // to fully test this since needs random access point flag initializes to
1232 // true.
1233 CreateNewDemuxer();
1234 DemuxerStream* text_stream = NULL;
1235 EXPECT_CALL(host_, AddTextStream(_, _))
1236 .WillOnce(SaveArg<0>(&text_stream));
1237 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1238 HAS_TEXT | HAS_AUDIO | HAS_VIDEO, false, false));
1239 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1240 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1241 ASSERT_TRUE(audio_stream && video_stream && text_stream);
1243 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0 23K");
1244 AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0 30K");
1245 AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0 40K");
1246 CheckExpectedRanges(kSourceId, "{ [30,46) }");
1248 AppendInitSegment(HAS_TEXT | HAS_AUDIO | HAS_VIDEO);
1249 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "46 69K");
1250 AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "60 90K");
1251 AppendSingleStreamCluster(kSourceId, kTextTrackNum, "80 90K");
1252 CheckExpectedRanges(kSourceId, "{ [30,92) }");
1254 CheckExpectedBuffers(audio_stream, "23 69");
1255 CheckExpectedBuffers(video_stream, "30 90");
1257 // WebM parser marks all text buffers as keyframes.
1258 CheckExpectedBuffers(text_stream, "0 40 80 90");
1261 // Make sure that the demuxer reports an error if Shutdown()
1262 // is called before all the initialization segments are appended.
1263 TEST_P(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppended) {
1264 EXPECT_CALL(*this, DemuxerOpened());
1265 demuxer_->Initialize(
1266 &host_, CreateInitDoneCB(
1267 kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1269 EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
1270 EXPECT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
1272 AppendInitSegmentWithSourceId("audio", HAS_AUDIO);
1274 ShutdownDemuxer();
1277 TEST_P(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppendedText) {
1278 EXPECT_CALL(*this, DemuxerOpened());
1279 demuxer_->Initialize(
1280 &host_, CreateInitDoneCB(
1281 kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1283 EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
1284 EXPECT_EQ(AddId("video_and_text", HAS_VIDEO), ChunkDemuxer::kOk);
1286 EXPECT_CALL(host_, AddTextStream(_, _))
1287 .Times(Exactly(1));
1289 AppendInitSegmentWithSourceId("video_and_text", HAS_VIDEO | HAS_TEXT);
1291 ShutdownDemuxer();
1294 // Verifies that all streams waiting for data receive an end of stream
1295 // buffer when Shutdown() is called.
1296 TEST_P(ChunkDemuxerTest, Shutdown_EndOfStreamWhileWaitingForData) {
1297 DemuxerStream* text_stream = NULL;
1298 EXPECT_CALL(host_, AddTextStream(_, _))
1299 .WillOnce(SaveArg<0>(&text_stream));
1300 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
1302 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1303 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1305 bool audio_read_done = false;
1306 bool video_read_done = false;
1307 bool text_read_done = false;
1308 audio_stream->Read(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
1309 video_stream->Read(base::Bind(&OnReadDone_EOSExpected, &video_read_done));
1310 text_stream->Read(base::Bind(&OnReadDone_EOSExpected, &text_read_done));
1311 message_loop_.RunUntilIdle();
1313 EXPECT_FALSE(audio_read_done);
1314 EXPECT_FALSE(video_read_done);
1315 EXPECT_FALSE(text_read_done);
1317 ShutdownDemuxer();
1319 EXPECT_TRUE(audio_read_done);
1320 EXPECT_TRUE(video_read_done);
1321 EXPECT_TRUE(text_read_done);
1324 // Test that Seek() completes successfully when the first cluster
1325 // arrives.
1326 TEST_P(ChunkDemuxerTest, AppendDataAfterSeek) {
1327 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1328 AppendCluster(kDefaultFirstCluster());
1330 InSequence s;
1332 EXPECT_CALL(*this, Checkpoint(1));
1334 Seek(base::TimeDelta::FromMilliseconds(46));
1336 EXPECT_CALL(*this, Checkpoint(2));
1338 Checkpoint(1);
1340 AppendCluster(kDefaultSecondCluster());
1342 message_loop_.RunUntilIdle();
1344 Checkpoint(2);
1347 // Test that parsing errors are handled for clusters appended after init.
1348 TEST_P(ChunkDemuxerTest, ErrorWhileParsingClusterAfterInit) {
1349 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1350 AppendCluster(kDefaultFirstCluster());
1352 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1353 AppendGarbage();
1356 // Test the case where a Seek() is requested while the parser
1357 // is in the middle of cluster. This is to verify that the parser
1358 // does not reset itself on a seek.
1359 TEST_P(ChunkDemuxerTest, SeekWhileParsingCluster) {
1360 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1362 InSequence s;
1364 scoped_ptr<Cluster> cluster_a(GenerateCluster(0, 6));
1366 // Split the cluster into two appends at an arbitrary point near the end.
1367 int first_append_size = cluster_a->size() - 11;
1368 int second_append_size = cluster_a->size() - first_append_size;
1370 // Append the first part of the cluster.
1371 AppendData(cluster_a->data(), first_append_size);
1373 ExpectRead(DemuxerStream::AUDIO, 0);
1374 ExpectRead(DemuxerStream::VIDEO, 0);
1375 ExpectRead(DemuxerStream::AUDIO, kAudioBlockDuration);
1377 Seek(base::TimeDelta::FromSeconds(5));
1379 // Append the rest of the cluster.
1380 AppendData(cluster_a->data() + first_append_size, second_append_size);
1382 // Append the new cluster and verify that only the blocks
1383 // in the new cluster are returned.
1384 AppendCluster(GenerateCluster(5000, 6));
1385 GenerateExpectedReads(5000, 6);
1388 // Test the case where AppendData() is called before Init().
1389 TEST_P(ChunkDemuxerTest, AppendDataBeforeInit) {
1390 scoped_ptr<uint8[]> info_tracks;
1391 int info_tracks_size = 0;
1392 CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
1393 false, false, &info_tracks, &info_tracks_size);
1394 demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
1395 append_window_start_for_next_append_,
1396 append_window_end_for_next_append_,
1397 &timestamp_offset_map_[kSourceId]);
1400 // Make sure Read() callbacks are dispatched with the proper data.
1401 TEST_P(ChunkDemuxerTest, Read) {
1402 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1404 AppendCluster(kDefaultFirstCluster());
1406 bool audio_read_done = false;
1407 bool video_read_done = false;
1408 ReadAudio(base::Bind(&OnReadDone,
1409 base::TimeDelta::FromMilliseconds(0),
1410 &audio_read_done));
1411 ReadVideo(base::Bind(&OnReadDone,
1412 base::TimeDelta::FromMilliseconds(0),
1413 &video_read_done));
1415 EXPECT_TRUE(audio_read_done);
1416 EXPECT_TRUE(video_read_done);
1419 TEST_P(ChunkDemuxerTest, OutOfOrderClusters) {
1420 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1421 AppendCluster(kDefaultFirstCluster());
1422 AppendCluster(GenerateCluster(10, 4));
1424 // Make sure that AppendCluster() does not fail with a cluster that has
1425 // overlaps with the previously appended cluster.
1426 AppendCluster(GenerateCluster(5, 4));
1428 // Verify that AppendData() can still accept more data.
1429 scoped_ptr<Cluster> cluster_c(GenerateCluster(45, 2));
1430 demuxer_->AppendData(kSourceId, cluster_c->data(), cluster_c->size(),
1431 append_window_start_for_next_append_,
1432 append_window_end_for_next_append_,
1433 &timestamp_offset_map_[kSourceId]);
1436 TEST_P(ChunkDemuxerTest, NonMonotonicButAboveClusterTimecode) {
1437 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1438 AppendCluster(kDefaultFirstCluster());
1440 ClusterBuilder cb;
1442 // Test the case where block timecodes are not monotonically
1443 // increasing but stay above the cluster timecode.
1444 cb.SetClusterTimecode(5);
1445 AddSimpleBlock(&cb, kAudioTrackNum, 5);
1446 AddSimpleBlock(&cb, kVideoTrackNum, 10);
1447 AddSimpleBlock(&cb, kAudioTrackNum, 7);
1448 AddSimpleBlock(&cb, kVideoTrackNum, 15);
1450 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1451 AppendCluster(cb.Finish());
1453 // Verify that AppendData() ignores data after the error.
1454 scoped_ptr<Cluster> cluster_b(GenerateCluster(20, 2));
1455 demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
1456 append_window_start_for_next_append_,
1457 append_window_end_for_next_append_,
1458 &timestamp_offset_map_[kSourceId]);
1461 TEST_P(ChunkDemuxerTest, BackwardsAndBeforeClusterTimecode) {
1462 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1463 AppendCluster(kDefaultFirstCluster());
1465 ClusterBuilder cb;
1467 // Test timecodes going backwards and including values less than the cluster
1468 // timecode.
1469 cb.SetClusterTimecode(5);
1470 AddSimpleBlock(&cb, kAudioTrackNum, 5);
1471 AddSimpleBlock(&cb, kVideoTrackNum, 5);
1472 AddSimpleBlock(&cb, kAudioTrackNum, 3);
1473 AddSimpleBlock(&cb, kVideoTrackNum, 3);
1475 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1476 AppendCluster(cb.Finish());
1478 // Verify that AppendData() ignores data after the error.
1479 scoped_ptr<Cluster> cluster_b(GenerateCluster(6, 2));
1480 demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
1481 append_window_start_for_next_append_,
1482 append_window_end_for_next_append_,
1483 &timestamp_offset_map_[kSourceId]);
1487 TEST_P(ChunkDemuxerTest, PerStreamMonotonicallyIncreasingTimestamps) {
1488 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1489 AppendCluster(kDefaultFirstCluster());
1491 ClusterBuilder cb;
1493 // Test monotonic increasing timestamps on a per stream
1494 // basis.
1495 cb.SetClusterTimecode(5);
1496 AddSimpleBlock(&cb, kAudioTrackNum, 5);
1497 AddSimpleBlock(&cb, kVideoTrackNum, 5);
1498 AddSimpleBlock(&cb, kAudioTrackNum, 4);
1499 AddSimpleBlock(&cb, kVideoTrackNum, 7);
1501 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1502 AppendCluster(cb.Finish());
1505 // Test the case where a cluster is passed to AppendCluster() before
1506 // INFO & TRACKS data.
1507 TEST_P(ChunkDemuxerTest, ClusterBeforeInitSegment) {
1508 EXPECT_CALL(*this, DemuxerOpened());
1509 demuxer_->Initialize(
1510 &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1512 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1514 AppendCluster(GenerateCluster(0, 1));
1517 // Test cases where we get an MarkEndOfStream() call during initialization.
1518 TEST_P(ChunkDemuxerTest, EOSDuringInit) {
1519 EXPECT_CALL(*this, DemuxerOpened());
1520 demuxer_->Initialize(
1521 &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1522 MarkEndOfStream(PIPELINE_OK);
1525 TEST_P(ChunkDemuxerTest, EndOfStreamWithNoAppend) {
1526 EXPECT_CALL(*this, DemuxerOpened());
1527 demuxer_->Initialize(
1528 &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1530 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1532 CheckExpectedRanges("{ }");
1533 MarkEndOfStream(PIPELINE_OK);
1534 ShutdownDemuxer();
1535 CheckExpectedRanges("{ }");
1536 demuxer_->RemoveId(kSourceId);
1537 demuxer_.reset();
1540 TEST_P(ChunkDemuxerTest, EndOfStreamWithNoMediaAppend) {
1541 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1543 CheckExpectedRanges("{ }");
1544 MarkEndOfStream(PIPELINE_OK);
1545 CheckExpectedRanges("{ }");
1548 TEST_P(ChunkDemuxerTest, DecodeErrorEndOfStream) {
1549 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1551 AppendCluster(kDefaultFirstCluster());
1552 CheckExpectedRanges(kDefaultFirstClusterRange);
1554 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1555 MarkEndOfStream(PIPELINE_ERROR_DECODE);
1556 CheckExpectedRanges(kDefaultFirstClusterRange);
1559 TEST_P(ChunkDemuxerTest, NetworkErrorEndOfStream) {
1560 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1562 AppendCluster(kDefaultFirstCluster());
1563 CheckExpectedRanges(kDefaultFirstClusterRange);
1565 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_NETWORK));
1566 MarkEndOfStream(PIPELINE_ERROR_NETWORK);
1569 // Helper class to reduce duplicate code when testing end of stream
1570 // Read() behavior.
1571 class EndOfStreamHelper {
1572 public:
1573 explicit EndOfStreamHelper(Demuxer* demuxer)
1574 : demuxer_(demuxer),
1575 audio_read_done_(false),
1576 video_read_done_(false) {
1579 // Request a read on the audio and video streams.
1580 void RequestReads() {
1581 EXPECT_FALSE(audio_read_done_);
1582 EXPECT_FALSE(video_read_done_);
1584 DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
1585 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
1587 audio->Read(base::Bind(&OnEndOfStreamReadDone, &audio_read_done_));
1588 video->Read(base::Bind(&OnEndOfStreamReadDone, &video_read_done_));
1589 base::MessageLoop::current()->RunUntilIdle();
1592 // Check to see if |audio_read_done_| and |video_read_done_| variables
1593 // match |expected|.
1594 void CheckIfReadDonesWereCalled(bool expected) {
1595 base::MessageLoop::current()->RunUntilIdle();
1596 EXPECT_EQ(expected, audio_read_done_);
1597 EXPECT_EQ(expected, video_read_done_);
1600 private:
1601 static void OnEndOfStreamReadDone(
1602 bool* called,
1603 DemuxerStream::Status status,
1604 const scoped_refptr<DecoderBuffer>& buffer) {
1605 EXPECT_EQ(status, DemuxerStream::kOk);
1606 EXPECT_TRUE(buffer->end_of_stream());
1607 *called = true;
1610 Demuxer* demuxer_;
1611 bool audio_read_done_;
1612 bool video_read_done_;
1614 DISALLOW_COPY_AND_ASSIGN(EndOfStreamHelper);
1617 // Make sure that all pending reads that we don't have media data for get an
1618 // "end of stream" buffer when MarkEndOfStream() is called.
1619 TEST_P(ChunkDemuxerTest, EndOfStreamWithPendingReads) {
1620 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1622 AppendCluster(GenerateCluster(0, 2));
1624 bool audio_read_done_1 = false;
1625 bool video_read_done_1 = false;
1626 EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
1627 EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
1629 ReadAudio(base::Bind(&OnReadDone,
1630 base::TimeDelta::FromMilliseconds(0),
1631 &audio_read_done_1));
1632 ReadVideo(base::Bind(&OnReadDone,
1633 base::TimeDelta::FromMilliseconds(0),
1634 &video_read_done_1));
1635 message_loop_.RunUntilIdle();
1637 EXPECT_TRUE(audio_read_done_1);
1638 EXPECT_TRUE(video_read_done_1);
1640 end_of_stream_helper_1.RequestReads();
1642 EXPECT_CALL(host_, SetDuration(
1643 base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
1644 MarkEndOfStream(PIPELINE_OK);
1646 end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
1648 end_of_stream_helper_2.RequestReads();
1649 end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
1652 // Make sure that all Read() calls after we get an MarkEndOfStream()
1653 // call return an "end of stream" buffer.
1654 TEST_P(ChunkDemuxerTest, ReadsAfterEndOfStream) {
1655 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1657 AppendCluster(GenerateCluster(0, 2));
1659 bool audio_read_done_1 = false;
1660 bool video_read_done_1 = false;
1661 EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
1662 EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
1663 EndOfStreamHelper end_of_stream_helper_3(demuxer_.get());
1665 ReadAudio(base::Bind(&OnReadDone,
1666 base::TimeDelta::FromMilliseconds(0),
1667 &audio_read_done_1));
1668 ReadVideo(base::Bind(&OnReadDone,
1669 base::TimeDelta::FromMilliseconds(0),
1670 &video_read_done_1));
1672 end_of_stream_helper_1.RequestReads();
1674 EXPECT_TRUE(audio_read_done_1);
1675 EXPECT_TRUE(video_read_done_1);
1676 end_of_stream_helper_1.CheckIfReadDonesWereCalled(false);
1678 EXPECT_CALL(host_, SetDuration(
1679 base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
1680 MarkEndOfStream(PIPELINE_OK);
1682 end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
1684 // Request a few more reads and make sure we immediately get
1685 // end of stream buffers.
1686 end_of_stream_helper_2.RequestReads();
1687 end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
1689 end_of_stream_helper_3.RequestReads();
1690 end_of_stream_helper_3.CheckIfReadDonesWereCalled(true);
1693 TEST_P(ChunkDemuxerTest, EndOfStreamDuringCanceledSeek) {
1694 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1696 AppendCluster(0, 10);
1697 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(138)));
1698 MarkEndOfStream(PIPELINE_OK);
1700 // Start the first seek.
1701 Seek(base::TimeDelta::FromMilliseconds(20));
1703 // Simulate another seek being requested before the first
1704 // seek has finished prerolling.
1705 base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(30);
1706 demuxer_->CancelPendingSeek(seek_time2);
1708 // Finish second seek.
1709 Seek(seek_time2);
1711 DemuxerStream::Status status;
1712 base::TimeDelta last_timestamp;
1714 // Make sure audio can reach end of stream.
1715 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
1716 ASSERT_EQ(status, DemuxerStream::kOk);
1718 // Make sure video can reach end of stream.
1719 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
1720 ASSERT_EQ(status, DemuxerStream::kOk);
1723 // Verify buffered range change behavior for audio/video/text tracks.
1724 TEST_P(ChunkDemuxerTest, EndOfStreamRangeChanges) {
1725 DemuxerStream* text_stream = NULL;
1727 EXPECT_CALL(host_, AddTextStream(_, _))
1728 .WillOnce(SaveArg<0>(&text_stream));
1729 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
1731 AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 33");
1732 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23K");
1734 // Check expected ranges and verify that an empty text track does not
1735 // affect the expected ranges.
1736 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1738 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
1739 MarkEndOfStream(PIPELINE_OK);
1741 // Check expected ranges and verify that an empty text track does not
1742 // affect the expected ranges.
1743 CheckExpectedRanges(kSourceId, "{ [0,66) }");
1745 // Unmark end of stream state and verify that the ranges return to
1746 // their pre-"end of stream" values.
1747 demuxer_->UnmarkEndOfStream();
1748 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1750 // Add text track data and verify that the buffered ranges don't change
1751 // since the intersection of all the tracks doesn't change.
1752 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(200)));
1753 AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K");
1754 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1756 // Mark end of stream and verify that text track data is reflected in
1757 // the new range.
1758 MarkEndOfStream(PIPELINE_OK);
1759 CheckExpectedRanges(kSourceId, "{ [0,200) }");
1762 // Make sure AppendData() will accept elements that span multiple calls.
1763 TEST_P(ChunkDemuxerTest, AppendingInPieces) {
1764 EXPECT_CALL(*this, DemuxerOpened());
1765 demuxer_->Initialize(
1766 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
1768 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1770 scoped_ptr<uint8[]> info_tracks;
1771 int info_tracks_size = 0;
1772 CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
1773 false, false, &info_tracks, &info_tracks_size);
1775 scoped_ptr<Cluster> cluster_a(kDefaultFirstCluster());
1776 scoped_ptr<Cluster> cluster_b(kDefaultSecondCluster());
1778 size_t buffer_size = info_tracks_size + cluster_a->size() + cluster_b->size();
1779 scoped_ptr<uint8[]> buffer(new uint8[buffer_size]);
1780 uint8* dst = buffer.get();
1781 memcpy(dst, info_tracks.get(), info_tracks_size);
1782 dst += info_tracks_size;
1784 memcpy(dst, cluster_a->data(), cluster_a->size());
1785 dst += cluster_a->size();
1787 memcpy(dst, cluster_b->data(), cluster_b->size());
1788 dst += cluster_b->size();
1790 AppendDataInPieces(buffer.get(), buffer_size);
1792 GenerateExpectedReads(0, 9);
1795 TEST_P(ChunkDemuxerTest, WebMFile_AudioAndVideo) {
1796 struct BufferTimestamps buffer_timestamps[] = {
1797 {0, 0},
1798 {33, 3},
1799 {67, 6},
1800 {100, 9},
1801 {133, 12},
1802 {kSkip, kSkip},
1805 // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
1806 // ParseWebMFile() call's expected duration, below, once the file is fixed to
1807 // have the correct duration in the init segment. See http://crbug.com/354284.
1808 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
1810 ASSERT_TRUE(ParseWebMFile("bear-320x240.webm", buffer_timestamps,
1811 base::TimeDelta::FromMilliseconds(2744)));
1814 TEST_P(ChunkDemuxerTest, WebMFile_LiveAudioAndVideo) {
1815 struct BufferTimestamps buffer_timestamps[] = {
1816 {0, 0},
1817 {33, 3},
1818 {67, 6},
1819 {100, 9},
1820 {133, 12},
1821 {kSkip, kSkip},
1824 ASSERT_TRUE(ParseWebMFile("bear-320x240-live.webm", buffer_timestamps,
1825 kInfiniteDuration()));
1828 TEST_P(ChunkDemuxerTest, WebMFile_AudioOnly) {
1829 struct BufferTimestamps buffer_timestamps[] = {
1830 {kSkip, 0},
1831 {kSkip, 3},
1832 {kSkip, 6},
1833 {kSkip, 9},
1834 {kSkip, 12},
1835 {kSkip, kSkip},
1838 // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
1839 // ParseWebMFile() call's expected duration, below, once the file is fixed to
1840 // have the correct duration in the init segment. See http://crbug.com/354284.
1841 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
1843 ASSERT_TRUE(ParseWebMFile("bear-320x240-audio-only.webm", buffer_timestamps,
1844 base::TimeDelta::FromMilliseconds(2744),
1845 HAS_AUDIO));
1848 TEST_P(ChunkDemuxerTest, WebMFile_VideoOnly) {
1849 struct BufferTimestamps buffer_timestamps[] = {
1850 {0, kSkip},
1851 {33, kSkip},
1852 {67, kSkip},
1853 {100, kSkip},
1854 {133, kSkip},
1855 {kSkip, kSkip},
1858 // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
1859 // ParseWebMFile() call's expected duration, below, once the file is fixed to
1860 // have the correct duration in the init segment. See http://crbug.com/354284.
1861 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2736)));
1863 ASSERT_TRUE(ParseWebMFile("bear-320x240-video-only.webm", buffer_timestamps,
1864 base::TimeDelta::FromMilliseconds(2703),
1865 HAS_VIDEO));
1868 TEST_P(ChunkDemuxerTest, WebMFile_AltRefFrames) {
1869 struct BufferTimestamps buffer_timestamps[] = {
1870 {0, 0},
1871 {33, 3},
1872 {33, 6},
1873 {67, 9},
1874 {100, 12},
1875 {kSkip, kSkip},
1878 ASSERT_TRUE(ParseWebMFile("bear-320x240-altref.webm", buffer_timestamps,
1879 base::TimeDelta::FromMilliseconds(2767)));
1882 // Verify that we output buffers before the entire cluster has been parsed.
1883 TEST_P(ChunkDemuxerTest, IncrementalClusterParsing) {
1884 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1885 AppendEmptyCluster(0);
1887 scoped_ptr<Cluster> cluster(GenerateCluster(0, 6));
1889 bool audio_read_done = false;
1890 bool video_read_done = false;
1891 ReadAudio(base::Bind(&OnReadDone,
1892 base::TimeDelta::FromMilliseconds(0),
1893 &audio_read_done));
1894 ReadVideo(base::Bind(&OnReadDone,
1895 base::TimeDelta::FromMilliseconds(0),
1896 &video_read_done));
1898 // Make sure the reads haven't completed yet.
1899 EXPECT_FALSE(audio_read_done);
1900 EXPECT_FALSE(video_read_done);
1902 // Append data one byte at a time until one or both reads complete.
1903 int i = 0;
1904 for (; i < cluster->size() && !(audio_read_done || video_read_done); ++i) {
1905 AppendData(cluster->data() + i, 1);
1906 message_loop_.RunUntilIdle();
1909 EXPECT_TRUE(audio_read_done || video_read_done);
1910 EXPECT_GT(i, 0);
1911 EXPECT_LT(i, cluster->size());
1913 audio_read_done = false;
1914 video_read_done = false;
1915 ReadAudio(base::Bind(&OnReadDone,
1916 base::TimeDelta::FromMilliseconds(23),
1917 &audio_read_done));
1918 ReadVideo(base::Bind(&OnReadDone,
1919 base::TimeDelta::FromMilliseconds(33),
1920 &video_read_done));
1922 // Make sure the reads haven't completed yet.
1923 EXPECT_FALSE(audio_read_done);
1924 EXPECT_FALSE(video_read_done);
1926 // Append the remaining data.
1927 ASSERT_LT(i, cluster->size());
1928 AppendData(cluster->data() + i, cluster->size() - i);
1930 message_loop_.RunUntilIdle();
1932 EXPECT_TRUE(audio_read_done);
1933 EXPECT_TRUE(video_read_done);
1936 TEST_P(ChunkDemuxerTest, ParseErrorDuringInit) {
1937 EXPECT_CALL(*this, DemuxerOpened());
1938 demuxer_->Initialize(
1939 &host_, CreateInitDoneCB(
1940 kNoTimestamp(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1942 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1944 uint8 tmp = 0;
1945 demuxer_->AppendData(kSourceId, &tmp, 1,
1946 append_window_start_for_next_append_,
1947 append_window_end_for_next_append_,
1948 &timestamp_offset_map_[kSourceId]);
1951 TEST_P(ChunkDemuxerTest, AVHeadersWithAudioOnlyType) {
1952 EXPECT_CALL(*this, DemuxerOpened());
1953 demuxer_->Initialize(
1954 &host_, CreateInitDoneCB(kNoTimestamp(),
1955 DEMUXER_ERROR_COULD_NOT_OPEN), true);
1957 std::vector<std::string> codecs(1);
1958 codecs[0] = "vorbis";
1959 ASSERT_EQ(demuxer_->AddId(kSourceId, "audio/webm", codecs,
1960 use_legacy_frame_processor_),
1961 ChunkDemuxer::kOk);
1963 AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
1966 TEST_P(ChunkDemuxerTest, AVHeadersWithVideoOnlyType) {
1967 EXPECT_CALL(*this, DemuxerOpened());
1968 demuxer_->Initialize(
1969 &host_, CreateInitDoneCB(kNoTimestamp(),
1970 DEMUXER_ERROR_COULD_NOT_OPEN), true);
1972 std::vector<std::string> codecs(1);
1973 codecs[0] = "vp8";
1974 ASSERT_EQ(demuxer_->AddId(kSourceId, "video/webm", codecs,
1975 use_legacy_frame_processor_),
1976 ChunkDemuxer::kOk);
1978 AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
1981 TEST_P(ChunkDemuxerTest, MultipleHeaders) {
1982 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1984 AppendCluster(kDefaultFirstCluster());
1986 // Append another identical initialization segment.
1987 AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
1989 AppendCluster(kDefaultSecondCluster());
1991 GenerateExpectedReads(0, 9);
1994 TEST_P(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideo) {
1995 std::string audio_id = "audio1";
1996 std::string video_id = "video1";
1997 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
1999 // Append audio and video data into separate source ids.
2000 AppendCluster(audio_id,
2001 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2002 GenerateAudioStreamExpectedReads(0, 4);
2003 AppendCluster(video_id,
2004 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2005 GenerateVideoStreamExpectedReads(0, 4);
2008 TEST_P(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideoText) {
2009 // TODO(matthewjheaney): Here and elsewhere, we need more tests
2010 // for inband text tracks (http://crbug/321455).
2012 std::string audio_id = "audio1";
2013 std::string video_id = "video1";
2015 EXPECT_CALL(host_, AddTextStream(_, _))
2016 .Times(Exactly(2));
2017 ASSERT_TRUE(InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, true));
2019 // Append audio and video data into separate source ids.
2020 AppendCluster(audio_id,
2021 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2022 GenerateAudioStreamExpectedReads(0, 4);
2023 AppendCluster(video_id,
2024 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2025 GenerateVideoStreamExpectedReads(0, 4);
2028 TEST_P(ChunkDemuxerTest, AddIdFailures) {
2029 EXPECT_CALL(*this, DemuxerOpened());
2030 demuxer_->Initialize(
2031 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2033 std::string audio_id = "audio1";
2034 std::string video_id = "video1";
2036 ASSERT_EQ(AddId(audio_id, HAS_AUDIO), ChunkDemuxer::kOk);
2038 // Adding an id with audio/video should fail because we already added audio.
2039 ASSERT_EQ(AddId(), ChunkDemuxer::kReachedIdLimit);
2041 AppendInitSegmentWithSourceId(audio_id, HAS_AUDIO);
2043 // Adding an id after append should fail.
2044 ASSERT_EQ(AddId(video_id, HAS_VIDEO), ChunkDemuxer::kReachedIdLimit);
2047 // Test that Read() calls after a RemoveId() return "end of stream" buffers.
2048 TEST_P(ChunkDemuxerTest, RemoveId) {
2049 std::string audio_id = "audio1";
2050 std::string video_id = "video1";
2051 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2053 // Append audio and video data into separate source ids.
2054 AppendCluster(audio_id,
2055 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2056 AppendCluster(video_id,
2057 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2059 // Read() from audio should return normal buffers.
2060 GenerateAudioStreamExpectedReads(0, 4);
2062 // Remove the audio id.
2063 demuxer_->RemoveId(audio_id);
2065 // Read() from audio should return "end of stream" buffers.
2066 bool audio_read_done = false;
2067 ReadAudio(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
2068 message_loop_.RunUntilIdle();
2069 EXPECT_TRUE(audio_read_done);
2071 // Read() from video should still return normal buffers.
2072 GenerateVideoStreamExpectedReads(0, 4);
2075 // Test that removing an ID immediately after adding it does not interfere with
2076 // quota for new IDs in the future.
2077 TEST_P(ChunkDemuxerTest, RemoveAndAddId) {
2078 std::string audio_id_1 = "audio1";
2079 ASSERT_TRUE(AddId(audio_id_1, HAS_AUDIO) == ChunkDemuxer::kOk);
2080 demuxer_->RemoveId(audio_id_1);
2082 std::string audio_id_2 = "audio2";
2083 ASSERT_TRUE(AddId(audio_id_2, HAS_AUDIO) == ChunkDemuxer::kOk);
2086 TEST_P(ChunkDemuxerTest, SeekCanceled) {
2087 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2089 // Append cluster at the beginning of the stream.
2090 AppendCluster(GenerateCluster(0, 4));
2092 // Seek to an unbuffered region.
2093 Seek(base::TimeDelta::FromSeconds(50));
2095 // Attempt to read in unbuffered area; should not fulfill the read.
2096 bool audio_read_done = false;
2097 bool video_read_done = false;
2098 ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
2099 ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
2100 EXPECT_FALSE(audio_read_done);
2101 EXPECT_FALSE(video_read_done);
2103 // Now cancel the pending seek, which should flush the reads with empty
2104 // buffers.
2105 base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
2106 demuxer_->CancelPendingSeek(seek_time);
2107 message_loop_.RunUntilIdle();
2108 EXPECT_TRUE(audio_read_done);
2109 EXPECT_TRUE(video_read_done);
2111 // A seek back to the buffered region should succeed.
2112 Seek(seek_time);
2113 GenerateExpectedReads(0, 4);
2116 TEST_P(ChunkDemuxerTest, SeekCanceledWhileWaitingForSeek) {
2117 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2119 // Append cluster at the beginning of the stream.
2120 AppendCluster(GenerateCluster(0, 4));
2122 // Start waiting for a seek.
2123 base::TimeDelta seek_time1 = base::TimeDelta::FromSeconds(50);
2124 base::TimeDelta seek_time2 = base::TimeDelta::FromSeconds(0);
2125 demuxer_->StartWaitingForSeek(seek_time1);
2127 // Now cancel the upcoming seek to an unbuffered region.
2128 demuxer_->CancelPendingSeek(seek_time2);
2129 demuxer_->Seek(seek_time1, NewExpectedStatusCB(PIPELINE_OK));
2131 // Read requests should be fulfilled with empty buffers.
2132 bool audio_read_done = false;
2133 bool video_read_done = false;
2134 ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
2135 ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
2136 EXPECT_TRUE(audio_read_done);
2137 EXPECT_TRUE(video_read_done);
2139 // A seek back to the buffered region should succeed.
2140 Seek(seek_time2);
2141 GenerateExpectedReads(0, 4);
2144 // Test that Seek() successfully seeks to all source IDs.
2145 TEST_P(ChunkDemuxerTest, SeekAudioAndVideoSources) {
2146 std::string audio_id = "audio1";
2147 std::string video_id = "video1";
2148 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2150 AppendCluster(
2151 audio_id,
2152 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2153 AppendCluster(
2154 video_id,
2155 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2157 // Read() should return buffers at 0.
2158 bool audio_read_done = false;
2159 bool video_read_done = false;
2160 ReadAudio(base::Bind(&OnReadDone,
2161 base::TimeDelta::FromMilliseconds(0),
2162 &audio_read_done));
2163 ReadVideo(base::Bind(&OnReadDone,
2164 base::TimeDelta::FromMilliseconds(0),
2165 &video_read_done));
2166 EXPECT_TRUE(audio_read_done);
2167 EXPECT_TRUE(video_read_done);
2169 // Seek to 3 (an unbuffered region).
2170 Seek(base::TimeDelta::FromSeconds(3));
2172 audio_read_done = false;
2173 video_read_done = false;
2174 ReadAudio(base::Bind(&OnReadDone,
2175 base::TimeDelta::FromSeconds(3),
2176 &audio_read_done));
2177 ReadVideo(base::Bind(&OnReadDone,
2178 base::TimeDelta::FromSeconds(3),
2179 &video_read_done));
2180 // Read()s should not return until after data is appended at the Seek point.
2181 EXPECT_FALSE(audio_read_done);
2182 EXPECT_FALSE(video_read_done);
2184 AppendCluster(audio_id,
2185 GenerateSingleStreamCluster(
2186 3000, 3092, kAudioTrackNum, kAudioBlockDuration));
2187 AppendCluster(video_id,
2188 GenerateSingleStreamCluster(
2189 3000, 3132, kVideoTrackNum, kVideoBlockDuration));
2191 message_loop_.RunUntilIdle();
2193 // Read() should return buffers at 3.
2194 EXPECT_TRUE(audio_read_done);
2195 EXPECT_TRUE(video_read_done);
2198 // Test that Seek() completes successfully when EndOfStream
2199 // is called before data is available for that seek point.
2200 // This scenario might be useful if seeking past the end of stream
2201 // of either audio or video (or both).
2202 TEST_P(ChunkDemuxerTest, EndOfStreamAfterPastEosSeek) {
2203 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2205 AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
2206 AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
2208 // Seeking past the end of video.
2209 // Note: audio data is available for that seek point.
2210 bool seek_cb_was_called = false;
2211 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(110);
2212 demuxer_->StartWaitingForSeek(seek_time);
2213 demuxer_->Seek(seek_time,
2214 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
2215 message_loop_.RunUntilIdle();
2217 EXPECT_FALSE(seek_cb_was_called);
2219 EXPECT_CALL(host_, SetDuration(
2220 base::TimeDelta::FromMilliseconds(120)));
2221 MarkEndOfStream(PIPELINE_OK);
2222 message_loop_.RunUntilIdle();
2224 EXPECT_TRUE(seek_cb_was_called);
2226 ShutdownDemuxer();
2229 // Test that EndOfStream is ignored if coming during a pending seek
2230 // whose seek time is before some existing ranges.
2231 TEST_P(ChunkDemuxerTest, EndOfStreamDuringPendingSeek) {
2232 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2234 AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
2235 AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
2236 AppendCluster(GenerateSingleStreamCluster(200, 300, kAudioTrackNum, 10));
2237 AppendCluster(GenerateSingleStreamCluster(200, 300, kVideoTrackNum, 5));
2239 bool seek_cb_was_called = false;
2240 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(160);
2241 demuxer_->StartWaitingForSeek(seek_time);
2242 demuxer_->Seek(seek_time,
2243 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
2244 message_loop_.RunUntilIdle();
2246 EXPECT_FALSE(seek_cb_was_called);
2248 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(300)));
2249 MarkEndOfStream(PIPELINE_OK);
2250 message_loop_.RunUntilIdle();
2252 EXPECT_FALSE(seek_cb_was_called);
2254 demuxer_->UnmarkEndOfStream();
2256 AppendCluster(GenerateSingleStreamCluster(140, 180, kAudioTrackNum, 10));
2257 AppendCluster(GenerateSingleStreamCluster(140, 180, kVideoTrackNum, 5));
2259 message_loop_.RunUntilIdle();
2261 EXPECT_TRUE(seek_cb_was_called);
2263 ShutdownDemuxer();
2266 // Test ranges in an audio-only stream.
2267 TEST_P(ChunkDemuxerTest, GetBufferedRanges_AudioIdOnly) {
2268 EXPECT_CALL(*this, DemuxerOpened());
2269 demuxer_->Initialize(
2270 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2272 ASSERT_EQ(AddId(kSourceId, HAS_AUDIO), ChunkDemuxer::kOk);
2273 AppendInitSegment(HAS_AUDIO);
2275 // Test a simple cluster.
2276 AppendCluster(
2277 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2279 CheckExpectedRanges("{ [0,92) }");
2281 // Append a disjoint cluster to check for two separate ranges.
2282 AppendCluster(GenerateSingleStreamCluster(
2283 150, 219, kAudioTrackNum, kAudioBlockDuration));
2285 CheckExpectedRanges("{ [0,92) [150,219) }");
2288 // Test ranges in a video-only stream.
2289 TEST_P(ChunkDemuxerTest, GetBufferedRanges_VideoIdOnly) {
2290 EXPECT_CALL(*this, DemuxerOpened());
2291 demuxer_->Initialize(
2292 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2294 ASSERT_EQ(AddId(kSourceId, HAS_VIDEO), ChunkDemuxer::kOk);
2295 AppendInitSegment(HAS_VIDEO);
2297 // Test a simple cluster.
2298 AppendCluster(
2299 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2301 CheckExpectedRanges("{ [0,132) }");
2303 // Append a disjoint cluster to check for two separate ranges.
2304 AppendCluster(GenerateSingleStreamCluster(
2305 200, 299, kVideoTrackNum, kVideoBlockDuration));
2307 CheckExpectedRanges("{ [0,132) [200,299) }");
2310 TEST_P(ChunkDemuxerTest, GetBufferedRanges_AudioVideo) {
2311 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2313 // Audio: 0 -> 23
2314 // Video: 0 -> 33
2315 // Buffered Range: 0 -> 23
2316 // Audio block duration is smaller than video block duration,
2317 // so the buffered ranges should correspond to the audio blocks.
2318 AppendCluster(GenerateSingleStreamCluster(
2319 0, kAudioBlockDuration, kAudioTrackNum, kAudioBlockDuration));
2320 AppendCluster(GenerateSingleStreamCluster(
2321 0, kVideoBlockDuration, kVideoTrackNum, kVideoBlockDuration));
2323 CheckExpectedRanges("{ [0,23) }");
2325 // Audio: 300 -> 400
2326 // Video: 320 -> 420
2327 // Buffered Range: 320 -> 400 (end overlap)
2328 AppendCluster(GenerateSingleStreamCluster(300, 400, kAudioTrackNum, 50));
2329 AppendCluster(GenerateSingleStreamCluster(320, 420, kVideoTrackNum, 50));
2331 CheckExpectedRanges("{ [0,23) [320,400) }");
2333 // Audio: 520 -> 590
2334 // Video: 500 -> 570
2335 // Buffered Range: 520 -> 570 (front overlap)
2336 AppendCluster(GenerateSingleStreamCluster(520, 590, kAudioTrackNum, 70));
2337 AppendCluster(GenerateSingleStreamCluster(500, 570, kVideoTrackNum, 70));
2339 CheckExpectedRanges("{ [0,23) [320,400) [520,570) }");
2341 // Audio: 720 -> 750
2342 // Video: 700 -> 770
2343 // Buffered Range: 720 -> 750 (complete overlap, audio)
2344 AppendCluster(GenerateSingleStreamCluster(720, 750, kAudioTrackNum, 30));
2345 AppendCluster(GenerateSingleStreamCluster(700, 770, kVideoTrackNum, 70));
2347 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) }");
2349 // Audio: 900 -> 970
2350 // Video: 920 -> 950
2351 // Buffered Range: 920 -> 950 (complete overlap, video)
2352 AppendCluster(GenerateSingleStreamCluster(900, 970, kAudioTrackNum, 70));
2353 AppendCluster(GenerateSingleStreamCluster(920, 950, kVideoTrackNum, 30));
2355 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2357 // Appending within buffered range should not affect buffered ranges.
2358 AppendCluster(GenerateSingleStreamCluster(930, 950, kAudioTrackNum, 20));
2359 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2361 // Appending to single stream outside buffered ranges should not affect
2362 // buffered ranges.
2363 AppendCluster(GenerateSingleStreamCluster(1230, 1240, kVideoTrackNum, 10));
2364 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2367 TEST_P(ChunkDemuxerTest, GetBufferedRanges_AudioVideoText) {
2368 EXPECT_CALL(host_, AddTextStream(_, _));
2369 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
2371 // Append audio & video data
2372 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23");
2373 AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 33");
2375 // Verify that a text track with no cues does not result in an empty buffered
2376 // range.
2377 CheckExpectedRanges("{ [0,46) }");
2379 // Add some text cues.
2380 AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K");
2382 // Verify that the new cues did not affect the buffered ranges.
2383 CheckExpectedRanges("{ [0,46) }");
2385 // Remove the buffered range.
2386 demuxer_->Remove(kSourceId, base::TimeDelta(),
2387 base::TimeDelta::FromMilliseconds(46));
2388 CheckExpectedRanges("{ }");
2391 // Once MarkEndOfStream() is called, GetBufferedRanges should not cut off any
2392 // over-hanging tails at the end of the ranges as this is likely due to block
2393 // duration differences.
2394 TEST_P(ChunkDemuxerTest, GetBufferedRanges_EndOfStream) {
2395 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2397 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23K");
2398 AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 33");
2400 CheckExpectedRanges("{ [0,46) }");
2402 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
2403 MarkEndOfStream(PIPELINE_OK);
2405 // Verify that the range extends to the end of the video data.
2406 CheckExpectedRanges("{ [0,66) }");
2408 // Verify that the range reverts to the intersection when end of stream
2409 // has been cancelled.
2410 demuxer_->UnmarkEndOfStream();
2411 CheckExpectedRanges("{ [0,46) }");
2413 // Append and remove data so that the 2 streams' end ranges do not overlap.
2415 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(246)));
2416 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(398)));
2417 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "200K 223K");
2418 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
2419 "200K 233 266 299 332K 365");
2421 // At this point, the per-stream ranges are as follows:
2422 // Audio: [0,46) [200,246)
2423 // Video: [0,66) [200,398)
2424 CheckExpectedRanges("{ [0,46) [200,246) }");
2426 demuxer_->Remove(kSourceId, base::TimeDelta::FromMilliseconds(200),
2427 base::TimeDelta::FromMilliseconds(300));
2429 // At this point, the per-stream ranges are as follows:
2430 // Audio: [0,46)
2431 // Video: [0,66) [332,398)
2432 CheckExpectedRanges("{ [0,46) }");
2434 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "200K 223K");
2435 AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "200K 233");
2437 // At this point, the per-stream ranges are as follows:
2438 // Audio: [0,46) [200,246)
2439 // Video: [0,66) [200,266) [332,398)
2440 // NOTE: The last range on each stream do not overlap in time.
2441 CheckExpectedRanges("{ [0,46) [200,246) }");
2443 MarkEndOfStream(PIPELINE_OK);
2445 // NOTE: The last range on each stream gets extended to the highest
2446 // end timestamp according to the spec. The last audio range gets extended
2447 // from [200,246) to [200,398) which is why the intersection results in the
2448 // middle range getting larger AND the new range appearing.
2449 CheckExpectedRanges("{ [0,46) [200,266) [332,398) }");
2452 TEST_P(ChunkDemuxerTest, DifferentStreamTimecodes) {
2453 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2455 // Create a cluster where the video timecode begins 25ms after the audio.
2456 AppendCluster(GenerateCluster(0, 25, 8));
2458 Seek(base::TimeDelta::FromSeconds(0));
2459 GenerateExpectedReads(0, 25, 8);
2461 // Seek to 5 seconds.
2462 Seek(base::TimeDelta::FromSeconds(5));
2464 // Generate a cluster to fulfill this seek, where audio timecode begins 25ms
2465 // after the video.
2466 AppendCluster(GenerateCluster(5025, 5000, 8));
2467 GenerateExpectedReads(5025, 5000, 8);
2470 TEST_P(ChunkDemuxerTest, DifferentStreamTimecodesSeparateSources) {
2471 std::string audio_id = "audio1";
2472 std::string video_id = "video1";
2473 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2475 // Generate two streams where the video stream starts 5ms after the audio
2476 // stream and append them.
2477 AppendCluster(audio_id, GenerateSingleStreamCluster(
2478 25, 4 * kAudioBlockDuration + 25, kAudioTrackNum, kAudioBlockDuration));
2479 AppendCluster(video_id, GenerateSingleStreamCluster(
2480 30, 4 * kVideoBlockDuration + 30, kVideoTrackNum, kVideoBlockDuration));
2482 // Both streams should be able to fulfill a seek to 25.
2483 Seek(base::TimeDelta::FromMilliseconds(25));
2484 GenerateAudioStreamExpectedReads(25, 4);
2485 GenerateVideoStreamExpectedReads(30, 4);
2488 TEST_P(ChunkDemuxerTest, DifferentStreamTimecodesOutOfRange) {
2489 std::string audio_id = "audio1";
2490 std::string video_id = "video1";
2491 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2493 // Generate two streams where the video stream starts 10s after the audio
2494 // stream and append them.
2495 AppendCluster(audio_id, GenerateSingleStreamCluster(0,
2496 4 * kAudioBlockDuration + 0, kAudioTrackNum, kAudioBlockDuration));
2497 AppendCluster(video_id, GenerateSingleStreamCluster(10000,
2498 4 * kVideoBlockDuration + 10000, kVideoTrackNum, kVideoBlockDuration));
2500 // Should not be able to fulfill a seek to 0.
2501 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(0);
2502 demuxer_->StartWaitingForSeek(seek_time);
2503 demuxer_->Seek(seek_time,
2504 NewExpectedStatusCB(PIPELINE_ERROR_ABORT));
2505 ExpectRead(DemuxerStream::AUDIO, 0);
2506 ExpectEndOfStream(DemuxerStream::VIDEO);
2509 TEST_P(ChunkDemuxerTest, ClusterWithNoBuffers) {
2510 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2512 // Generate and append an empty cluster beginning at 0.
2513 AppendEmptyCluster(0);
2515 // Sanity check that data can be appended after this cluster correctly.
2516 AppendCluster(GenerateCluster(0, 2));
2517 ExpectRead(DemuxerStream::AUDIO, 0);
2518 ExpectRead(DemuxerStream::VIDEO, 0);
2521 TEST_P(ChunkDemuxerTest, CodecPrefixMatching) {
2522 ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
2524 #if defined(USE_PROPRIETARY_CODECS)
2525 expected = ChunkDemuxer::kOk;
2526 #endif
2528 std::vector<std::string> codecs;
2529 codecs.push_back("avc1.4D4041");
2531 EXPECT_EQ(demuxer_->AddId("source_id", "video/mp4", codecs,
2532 use_legacy_frame_processor_),
2533 expected);
2536 // Test codec ID's that are not compliant with RFC6381, but have been
2537 // seen in the wild.
2538 TEST_P(ChunkDemuxerTest, CodecIDsThatAreNotRFC6381Compliant) {
2539 ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
2541 #if defined(USE_PROPRIETARY_CODECS)
2542 expected = ChunkDemuxer::kOk;
2543 #endif
2544 const char* codec_ids[] = {
2545 // GPAC places leading zeros on the audio object type.
2546 "mp4a.40.02",
2547 "mp4a.40.05"
2550 for (size_t i = 0; i < arraysize(codec_ids); ++i) {
2551 std::vector<std::string> codecs;
2552 codecs.push_back(codec_ids[i]);
2554 ChunkDemuxer::Status result =
2555 demuxer_->AddId("source_id", "audio/mp4", codecs,
2556 use_legacy_frame_processor_);
2558 EXPECT_EQ(result, expected)
2559 << "Fail to add codec_id '" << codec_ids[i] << "'";
2561 if (result == ChunkDemuxer::kOk)
2562 demuxer_->RemoveId("source_id");
2566 TEST_P(ChunkDemuxerTest, EndOfStreamStillSetAfterSeek) {
2567 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2569 EXPECT_CALL(host_, SetDuration(_))
2570 .Times(AnyNumber());
2572 base::TimeDelta kLastAudioTimestamp = base::TimeDelta::FromMilliseconds(92);
2573 base::TimeDelta kLastVideoTimestamp = base::TimeDelta::FromMilliseconds(99);
2575 AppendCluster(kDefaultFirstCluster());
2576 AppendCluster(kDefaultSecondCluster());
2577 MarkEndOfStream(PIPELINE_OK);
2579 DemuxerStream::Status status;
2580 base::TimeDelta last_timestamp;
2582 // Verify that we can read audio & video to the end w/o problems.
2583 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2584 EXPECT_EQ(DemuxerStream::kOk, status);
2585 EXPECT_EQ(kLastAudioTimestamp, last_timestamp);
2587 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2588 EXPECT_EQ(DemuxerStream::kOk, status);
2589 EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
2591 // Seek back to 0 and verify that we can read to the end again..
2592 Seek(base::TimeDelta::FromMilliseconds(0));
2594 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2595 EXPECT_EQ(DemuxerStream::kOk, status);
2596 EXPECT_EQ(kLastAudioTimestamp, last_timestamp);
2598 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2599 EXPECT_EQ(DemuxerStream::kOk, status);
2600 EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
2603 TEST_P(ChunkDemuxerTest, GetBufferedRangesBeforeInitSegment) {
2604 EXPECT_CALL(*this, DemuxerOpened());
2605 demuxer_->Initialize(&host_, CreateInitDoneCB(PIPELINE_OK), true);
2606 ASSERT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
2607 ASSERT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
2609 CheckExpectedRanges("audio", "{ }");
2610 CheckExpectedRanges("video", "{ }");
2613 // Test that Seek() completes successfully when the first cluster
2614 // arrives.
2615 TEST_P(ChunkDemuxerTest, EndOfStreamDuringSeek) {
2616 InSequence s;
2618 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2620 AppendCluster(kDefaultFirstCluster());
2622 base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
2623 demuxer_->StartWaitingForSeek(seek_time);
2625 AppendCluster(kDefaultSecondCluster());
2626 EXPECT_CALL(host_, SetDuration(
2627 base::TimeDelta::FromMilliseconds(kDefaultSecondClusterEndTimestamp)));
2628 MarkEndOfStream(PIPELINE_OK);
2630 demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
2632 GenerateExpectedReads(0, 4);
2633 GenerateExpectedReads(46, 66, 5);
2635 EndOfStreamHelper end_of_stream_helper(demuxer_.get());
2636 end_of_stream_helper.RequestReads();
2637 end_of_stream_helper.CheckIfReadDonesWereCalled(true);
2640 TEST_P(ChunkDemuxerTest, ConfigChange_Video) {
2641 InSequence s;
2643 ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2645 DemuxerStream::Status status;
2646 base::TimeDelta last_timestamp;
2648 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2650 // Fetch initial video config and verify it matches what we expect.
2651 const VideoDecoderConfig& video_config_1 = video->video_decoder_config();
2652 ASSERT_TRUE(video_config_1.IsValidConfig());
2653 EXPECT_EQ(video_config_1.natural_size().width(), 320);
2654 EXPECT_EQ(video_config_1.natural_size().height(), 240);
2656 ExpectRead(DemuxerStream::VIDEO, 0);
2658 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2660 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2661 EXPECT_EQ(last_timestamp.InMilliseconds(), 501);
2663 // Fetch the new decoder config.
2664 const VideoDecoderConfig& video_config_2 = video->video_decoder_config();
2665 ASSERT_TRUE(video_config_2.IsValidConfig());
2666 EXPECT_EQ(video_config_2.natural_size().width(), 640);
2667 EXPECT_EQ(video_config_2.natural_size().height(), 360);
2669 ExpectRead(DemuxerStream::VIDEO, 527);
2671 // Read until the next config change.
2672 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2673 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2674 EXPECT_EQ(last_timestamp.InMilliseconds(), 793);
2676 // Get the new config and verify that it matches the first one.
2677 ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2679 ExpectRead(DemuxerStream::VIDEO, 801);
2681 // Read until the end of the stream just to make sure there aren't any other
2682 // config changes.
2683 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2684 ASSERT_EQ(status, DemuxerStream::kOk);
2687 TEST_P(ChunkDemuxerTest, ConfigChange_Audio) {
2688 InSequence s;
2690 ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2692 DemuxerStream::Status status;
2693 base::TimeDelta last_timestamp;
2695 DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
2697 // Fetch initial audio config and verify it matches what we expect.
2698 const AudioDecoderConfig& audio_config_1 = audio->audio_decoder_config();
2699 ASSERT_TRUE(audio_config_1.IsValidConfig());
2700 EXPECT_EQ(audio_config_1.samples_per_second(), 44100);
2701 EXPECT_EQ(audio_config_1.extra_data_size(), 3863u);
2703 ExpectRead(DemuxerStream::AUDIO, 0);
2705 // The first config change seen is from a splice frame representing an overlap
2706 // of buffer from config 1 by buffers from config 2.
2707 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2708 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2709 EXPECT_EQ(last_timestamp.InMilliseconds(), 524);
2711 // Fetch the new decoder config.
2712 const AudioDecoderConfig& audio_config_2 = audio->audio_decoder_config();
2713 ASSERT_TRUE(audio_config_2.IsValidConfig());
2714 EXPECT_EQ(audio_config_2.samples_per_second(), 44100);
2715 EXPECT_EQ(audio_config_2.extra_data_size(), 3935u);
2717 // The next config change is from a splice frame representing an overlap of
2718 // buffers from config 2 by buffers from config 1.
2719 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2720 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2721 EXPECT_EQ(last_timestamp.InMilliseconds(), 782);
2722 ASSERT_TRUE(audio_config_1.Matches(audio->audio_decoder_config()));
2724 // Read until the end of the stream just to make sure there aren't any other
2725 // config changes.
2726 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2727 ASSERT_EQ(status, DemuxerStream::kOk);
2728 EXPECT_EQ(last_timestamp.InMilliseconds(), 2744);
2731 TEST_P(ChunkDemuxerTest, ConfigChange_Seek) {
2732 InSequence s;
2734 ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2736 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2738 // Fetch initial video config and verify it matches what we expect.
2739 const VideoDecoderConfig& video_config_1 = video->video_decoder_config();
2740 ASSERT_TRUE(video_config_1.IsValidConfig());
2741 EXPECT_EQ(video_config_1.natural_size().width(), 320);
2742 EXPECT_EQ(video_config_1.natural_size().height(), 240);
2744 ExpectRead(DemuxerStream::VIDEO, 0);
2746 // Seek to a location with a different config.
2747 Seek(base::TimeDelta::FromMilliseconds(527));
2749 // Verify that the config change is signalled.
2750 ExpectConfigChanged(DemuxerStream::VIDEO);
2752 // Fetch the new decoder config and verify it is what we expect.
2753 const VideoDecoderConfig& video_config_2 = video->video_decoder_config();
2754 ASSERT_TRUE(video_config_2.IsValidConfig());
2755 EXPECT_EQ(video_config_2.natural_size().width(), 640);
2756 EXPECT_EQ(video_config_2.natural_size().height(), 360);
2758 // Verify that Read() will return a buffer now.
2759 ExpectRead(DemuxerStream::VIDEO, 527);
2761 // Seek back to the beginning and verify we get another config change.
2762 Seek(base::TimeDelta::FromMilliseconds(0));
2763 ExpectConfigChanged(DemuxerStream::VIDEO);
2764 ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2765 ExpectRead(DemuxerStream::VIDEO, 0);
2767 // Seek to a location that requires a config change and then
2768 // seek to a new location that has the same configuration as
2769 // the start of the file without a Read() in the middle.
2770 Seek(base::TimeDelta::FromMilliseconds(527));
2771 Seek(base::TimeDelta::FromMilliseconds(801));
2773 // Verify that no config change is signalled.
2774 ExpectRead(DemuxerStream::VIDEO, 801);
2775 ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2778 TEST_P(ChunkDemuxerTest, TimestampPositiveOffset) {
2779 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2781 ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(30)));
2782 AppendCluster(GenerateCluster(0, 2));
2784 Seek(base::TimeDelta::FromMilliseconds(30000));
2786 GenerateExpectedReads(30000, 2);
2789 TEST_P(ChunkDemuxerTest, TimestampNegativeOffset) {
2790 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2792 ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(-1)));
2793 AppendCluster(GenerateCluster(1000, 2));
2795 GenerateExpectedReads(0, 2);
2798 TEST_P(ChunkDemuxerTest, TimestampOffsetSeparateStreams) {
2799 std::string audio_id = "audio1";
2800 std::string video_id = "video1";
2801 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2803 ASSERT_TRUE(SetTimestampOffset(
2804 audio_id, base::TimeDelta::FromMilliseconds(-2500)));
2805 ASSERT_TRUE(SetTimestampOffset(
2806 video_id, base::TimeDelta::FromMilliseconds(-2500)));
2807 AppendCluster(audio_id, GenerateSingleStreamCluster(2500,
2808 2500 + kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
2809 AppendCluster(video_id, GenerateSingleStreamCluster(2500,
2810 2500 + kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
2811 GenerateAudioStreamExpectedReads(0, 4);
2812 GenerateVideoStreamExpectedReads(0, 4);
2814 Seek(base::TimeDelta::FromMilliseconds(27300));
2816 ASSERT_TRUE(SetTimestampOffset(
2817 audio_id, base::TimeDelta::FromMilliseconds(27300)));
2818 ASSERT_TRUE(SetTimestampOffset(
2819 video_id, base::TimeDelta::FromMilliseconds(27300)));
2820 AppendCluster(audio_id, GenerateSingleStreamCluster(
2821 0, kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
2822 AppendCluster(video_id, GenerateSingleStreamCluster(
2823 0, kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
2824 GenerateVideoStreamExpectedReads(27300, 4);
2825 GenerateAudioStreamExpectedReads(27300, 4);
2828 TEST_P(ChunkDemuxerTest, IsParsingMediaSegmentMidMediaSegment) {
2829 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2831 scoped_ptr<Cluster> cluster = GenerateCluster(0, 2);
2832 // Append only part of the cluster data.
2833 AppendData(cluster->data(), cluster->size() - 13);
2835 // Confirm we're in the middle of parsing a media segment.
2836 ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
2838 demuxer_->Abort(kSourceId,
2839 append_window_start_for_next_append_,
2840 append_window_end_for_next_append_,
2841 &timestamp_offset_map_[kSourceId]);
2843 // After Abort(), parsing should no longer be in the middle of a media
2844 // segment.
2845 ASSERT_FALSE(demuxer_->IsParsingMediaSegment(kSourceId));
2848 #if defined(USE_PROPRIETARY_CODECS)
2849 #if defined(ENABLE_MPEG2TS_STREAM_PARSER)
2850 TEST_P(ChunkDemuxerTest, EmitBuffersDuringAbort) {
2851 EXPECT_CALL(*this, DemuxerOpened());
2852 demuxer_->Initialize(
2853 &host_, CreateInitDoneCB(kInfiniteDuration(), PIPELINE_OK), true);
2854 EXPECT_EQ(ChunkDemuxer::kOk, AddIdForMp2tSource(kSourceId));
2856 // For info:
2857 // DTS/PTS derived using dvbsnoop -s ts -if bear-1280x720.ts -tssubdecode
2858 // Video: first PES:
2859 // PTS: 126912 (0x0001efc0) [= 90 kHz-Timestamp: 0:00:01.4101]
2860 // DTS: 123909 (0x0001e405) [= 90 kHz-Timestamp: 0:00:01.3767]
2861 // Audio: first PES:
2862 // PTS: 126000 (0x0001ec30) [= 90 kHz-Timestamp: 0:00:01.4000]
2863 // DTS: 123910 (0x0001e406) [= 90 kHz-Timestamp: 0:00:01.3767]
2864 // Video: last PES:
2865 // PTS: 370155 (0x0005a5eb) [= 90 kHz-Timestamp: 0:00:04.1128]
2866 // DTS: 367152 (0x00059a30) [= 90 kHz-Timestamp: 0:00:04.0794]
2867 // Audio: last PES:
2868 // PTS: 353788 (0x000565fc) [= 90 kHz-Timestamp: 0:00:03.9309]
2870 scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile("bear-1280x720.ts");
2871 AppendData(kSourceId, buffer->data(), buffer->data_size());
2873 // Confirm we're in the middle of parsing a media segment.
2874 ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
2876 // Abort on the Mpeg2 TS parser triggers the emission of the last video
2877 // buffer which is pending in the stream parser.
2878 Ranges<base::TimeDelta> range_before_abort =
2879 demuxer_->GetBufferedRanges(kSourceId);
2880 demuxer_->Abort(kSourceId,
2881 append_window_start_for_next_append_,
2882 append_window_end_for_next_append_,
2883 &timestamp_offset_map_[kSourceId]);
2884 Ranges<base::TimeDelta> range_after_abort =
2885 demuxer_->GetBufferedRanges(kSourceId);
2887 ASSERT_EQ(range_before_abort.size(), 1u);
2888 ASSERT_EQ(range_after_abort.size(), 1u);
2889 EXPECT_EQ(range_after_abort.start(0), range_before_abort.start(0));
2890 EXPECT_GT(range_after_abort.end(0), range_before_abort.end(0));
2892 #endif
2893 #endif
2895 TEST_P(ChunkDemuxerTest, WebMIsParsingMediaSegmentDetection) {
2896 // TODO(wolenetz): Also test 'unknown' sized clusters.
2897 // See http://crbug.com/335676.
2898 const uint8 kBuffer[] = {
2899 0x1F, 0x43, 0xB6, 0x75, 0x83, // CLUSTER (size = 3)
2900 0xE7, 0x81, 0x01, // Cluster TIMECODE (value = 1)
2903 // This array indicates expected return value of IsParsingMediaSegment()
2904 // following each incrementally appended byte in |kBuffer|.
2905 const bool kExpectedReturnValues[] = {
2906 false, false, false, false, true,
2907 true, true, false,
2910 COMPILE_ASSERT(arraysize(kBuffer) == arraysize(kExpectedReturnValues),
2911 test_arrays_out_of_sync);
2912 COMPILE_ASSERT(arraysize(kBuffer) == sizeof(kBuffer), not_one_byte_per_index);
2914 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2916 for (size_t i = 0; i < sizeof(kBuffer); i++) {
2917 DVLOG(3) << "Appending and testing index " << i;
2918 AppendData(kBuffer + i, 1);
2919 bool expected_return_value = kExpectedReturnValues[i];
2920 EXPECT_EQ(expected_return_value,
2921 demuxer_->IsParsingMediaSegment(kSourceId));
2925 TEST_P(ChunkDemuxerTest, DurationChange) {
2926 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2927 const int kStreamDuration = kDefaultDuration().InMilliseconds();
2929 // Add data leading up to the currently set duration.
2930 AppendCluster(GenerateCluster(kStreamDuration - kAudioBlockDuration,
2931 kStreamDuration - kVideoBlockDuration,
2932 2));
2934 CheckExpectedRanges(kSourceId, "{ [201191,201224) }");
2936 // Add data beginning at the currently set duration and expect a new duration
2937 // to be signaled. Note that the last video block will have a higher end
2938 // timestamp than the last audio block.
2939 if (use_legacy_frame_processor_) {
2940 const int kNewStreamDurationAudio = kStreamDuration + kAudioBlockDuration;
2941 EXPECT_CALL(host_, SetDuration(
2942 base::TimeDelta::FromMilliseconds(kNewStreamDurationAudio)));
2944 const int kNewStreamDurationVideo = kStreamDuration + kVideoBlockDuration;
2945 EXPECT_CALL(host_, SetDuration(
2946 base::TimeDelta::FromMilliseconds(kNewStreamDurationVideo)));
2947 AppendCluster(GenerateCluster(kDefaultDuration().InMilliseconds(), 2));
2949 CheckExpectedRanges(kSourceId, "{ [201191,201247) }");
2951 // Add more data to the end of each media type. Note that the last audio block
2952 // will have a higher end timestamp than the last video block.
2953 const int kFinalStreamDuration = kStreamDuration + kAudioBlockDuration * 3;
2954 EXPECT_CALL(host_, SetDuration(
2955 base::TimeDelta::FromMilliseconds(kFinalStreamDuration)));
2956 AppendCluster(GenerateCluster(kStreamDuration + kAudioBlockDuration,
2957 kStreamDuration + kVideoBlockDuration,
2958 3));
2960 // See that the range has increased appropriately (but not to the full
2961 // duration of 201293, since there is not enough video appended for that).
2962 CheckExpectedRanges(kSourceId, "{ [201191,201290) }");
2965 TEST_P(ChunkDemuxerTest, DurationChangeTimestampOffset) {
2966 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2968 ASSERT_TRUE(SetTimestampOffset(kSourceId, kDefaultDuration()));
2970 if (use_legacy_frame_processor_) {
2971 EXPECT_CALL(host_, SetDuration(
2972 kDefaultDuration() + base::TimeDelta::FromMilliseconds(
2973 kAudioBlockDuration * 2)));
2975 EXPECT_CALL(host_, SetDuration(
2976 kDefaultDuration() + base::TimeDelta::FromMilliseconds(
2977 kVideoBlockDuration * 2)));
2978 AppendCluster(GenerateCluster(0, 4));
2981 TEST_P(ChunkDemuxerTest, EndOfStreamTruncateDuration) {
2982 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2984 AppendCluster(kDefaultFirstCluster());
2986 EXPECT_CALL(host_, SetDuration(
2987 base::TimeDelta::FromMilliseconds(kDefaultFirstClusterEndTimestamp)));
2988 MarkEndOfStream(PIPELINE_OK);
2992 TEST_P(ChunkDemuxerTest, ZeroLengthAppend) {
2993 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2994 AppendData(NULL, 0);
2997 TEST_P(ChunkDemuxerTest, AppendAfterEndOfStream) {
2998 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3000 EXPECT_CALL(host_, SetDuration(_))
3001 .Times(AnyNumber());
3003 AppendCluster(kDefaultFirstCluster());
3004 MarkEndOfStream(PIPELINE_OK);
3006 demuxer_->UnmarkEndOfStream();
3008 AppendCluster(kDefaultSecondCluster());
3009 MarkEndOfStream(PIPELINE_OK);
3012 // Test receiving a Shutdown() call before we get an Initialize()
3013 // call. This can happen if video element gets destroyed before
3014 // the pipeline has a chance to initialize the demuxer.
3015 TEST_P(ChunkDemuxerTest, Shutdown_BeforeInitialize) {
3016 demuxer_->Shutdown();
3017 demuxer_->Initialize(
3018 &host_, CreateInitDoneCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
3019 message_loop_.RunUntilIdle();
3022 // Verifies that signaling end of stream while stalled at a gap
3023 // boundary does not trigger end of stream buffers to be returned.
3024 TEST_P(ChunkDemuxerTest, EndOfStreamWhileWaitingForGapToBeFilled) {
3025 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3027 AppendCluster(0, 10);
3028 AppendCluster(300, 10);
3029 CheckExpectedRanges(kSourceId, "{ [0,132) [300,432) }");
3031 GenerateExpectedReads(0, 10);
3033 bool audio_read_done = false;
3034 bool video_read_done = false;
3035 ReadAudio(base::Bind(&OnReadDone,
3036 base::TimeDelta::FromMilliseconds(138),
3037 &audio_read_done));
3038 ReadVideo(base::Bind(&OnReadDone,
3039 base::TimeDelta::FromMilliseconds(138),
3040 &video_read_done));
3042 // Verify that the reads didn't complete
3043 EXPECT_FALSE(audio_read_done);
3044 EXPECT_FALSE(video_read_done);
3046 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(438)));
3047 MarkEndOfStream(PIPELINE_OK);
3049 // Verify that the reads still haven't completed.
3050 EXPECT_FALSE(audio_read_done);
3051 EXPECT_FALSE(video_read_done);
3053 demuxer_->UnmarkEndOfStream();
3055 AppendCluster(138, 22);
3057 message_loop_.RunUntilIdle();
3059 CheckExpectedRanges(kSourceId, "{ [0,435) }");
3061 // Verify that the reads have completed.
3062 EXPECT_TRUE(audio_read_done);
3063 EXPECT_TRUE(video_read_done);
3065 // Read the rest of the buffers.
3066 GenerateExpectedReads(161, 171, 20);
3068 // Verify that reads block because the append cleared the end of stream state.
3069 audio_read_done = false;
3070 video_read_done = false;
3071 ReadAudio(base::Bind(&OnReadDone_EOSExpected,
3072 &audio_read_done));
3073 ReadVideo(base::Bind(&OnReadDone_EOSExpected,
3074 &video_read_done));
3076 // Verify that the reads don't complete.
3077 EXPECT_FALSE(audio_read_done);
3078 EXPECT_FALSE(video_read_done);
3080 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(437)));
3081 MarkEndOfStream(PIPELINE_OK);
3083 EXPECT_TRUE(audio_read_done);
3084 EXPECT_TRUE(video_read_done);
3087 TEST_P(ChunkDemuxerTest, CanceledSeekDuringInitialPreroll) {
3088 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3090 // Cancel preroll.
3091 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(200);
3092 demuxer_->CancelPendingSeek(seek_time);
3094 // Initiate the seek to the new location.
3095 Seek(seek_time);
3097 // Append data to satisfy the seek.
3098 AppendCluster(seek_time.InMilliseconds(), 10);
3101 TEST_P(ChunkDemuxerTest, GCDuringSeek) {
3102 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3104 demuxer_->SetMemoryLimitsForTesting(5 * kBlockSize);
3106 base::TimeDelta seek_time1 = base::TimeDelta::FromMilliseconds(1000);
3107 base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(500);
3109 // Initiate a seek to |seek_time1|.
3110 Seek(seek_time1);
3112 // Append data to satisfy the first seek request.
3113 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3114 seek_time1.InMilliseconds(), 5);
3115 CheckExpectedRanges(kSourceId, "{ [1000,1115) }");
3117 // Signal that the second seek is starting.
3118 demuxer_->StartWaitingForSeek(seek_time2);
3120 // Append data to satisfy the second seek. This append triggers
3121 // the garbage collection logic since we set the memory limit to
3122 // 5 blocks.
3123 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3124 seek_time2.InMilliseconds(), 5);
3126 // Verify that the buffers that cover |seek_time2| do not get
3127 // garbage collected.
3128 CheckExpectedRanges(kSourceId, "{ [500,615) }");
3130 // Complete the seek.
3131 demuxer_->Seek(seek_time2, NewExpectedStatusCB(PIPELINE_OK));
3134 // Append more data and make sure that the blocks for |seek_time2|
3135 // don't get removed.
3137 // NOTE: The current GC algorithm tries to preserve the GOP at the
3138 // current position as well as the last appended GOP. This is
3139 // why there are 2 ranges in the expectations.
3140 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 700, 5);
3141 CheckExpectedRanges(kSourceId, "{ [500,592) [792,815) }");
3144 TEST_P(ChunkDemuxerTest, AppendWindow_Video) {
3145 ASSERT_TRUE(InitDemuxer(HAS_VIDEO));
3146 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3148 // Set the append window to [50,280).
3149 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
3150 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3152 // Append a cluster that starts before and ends after the append window.
3153 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3154 "0K 30 60 90 120K 150 180 210 240K 270 300 330K");
3156 // Verify that GOPs that start outside the window are not included
3157 // in the buffer. Also verify that buffers that start inside the
3158 // window and extend beyond the end of the window are not included.
3159 CheckExpectedRanges(kSourceId, "{ [120,270) }");
3160 CheckExpectedBuffers(stream, "120 150 180 210 240");
3162 // Extend the append window to [50,650).
3163 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3165 // Append more data and verify that adding buffers start at the next
3166 // keyframe.
3167 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3168 "360 390 420K 450 480 510 540K 570 600 630K");
3169 CheckExpectedRanges(kSourceId, "{ [120,270) [420,630) }");
3172 TEST_P(ChunkDemuxerTest, AppendWindow_Audio) {
3173 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3174 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3176 // Set the append window to [50,280).
3177 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
3178 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3180 // Append a cluster that starts before and ends after the append window.
3181 AppendSingleStreamCluster(
3182 kSourceId, kAudioTrackNum,
3183 "0K 30K 60K 90K 120K 150K 180K 210K 240K 270K 300K 330K");
3185 // Verify that frames that end outside the window are not included
3186 // in the buffer. Also verify that buffers that start inside the
3187 // window and extend beyond the end of the window are not included.
3189 // The first 50ms of the range should be truncated since it overlaps
3190 // the start of the append window.
3191 CheckExpectedRanges(kSourceId, "{ [50,270) }");
3193 // The "50P" buffer is the "0" buffer marked for complete discard. The next
3194 // "50" buffer is the "30" buffer marked with 20ms of start discard.
3195 CheckExpectedBuffers(stream, "50P 50 60 90 120 150 180 210 240");
3197 // Extend the append window to [50,650).
3198 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3200 // Append more data and verify that a new range is created.
3201 AppendSingleStreamCluster(
3202 kSourceId, kAudioTrackNum,
3203 "360K 390K 420K 450K 480K 510K 540K 570K 600K 630K");
3204 CheckExpectedRanges(kSourceId, "{ [50,270) [360,630) }");
3207 TEST_P(ChunkDemuxerTest, AppendWindow_AudioOverlapStartAndEnd) {
3208 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3210 // Set the append window to [10,20).
3211 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(10);
3212 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
3214 // Append a cluster that starts before and ends after the append window.
3215 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K");
3217 // Verify that everything is dropped in this case. No partial append should
3218 // be generated.
3219 CheckExpectedRanges(kSourceId, "{ }");
3222 TEST_P(ChunkDemuxerTest, AppendWindow_WebMFile_AudioOnly) {
3223 EXPECT_CALL(*this, DemuxerOpened());
3224 demuxer_->Initialize(
3225 &host_,
3226 CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744), PIPELINE_OK),
3227 true);
3228 ASSERT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, HAS_AUDIO));
3230 // Set the append window to [50,150).
3231 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(50);
3232 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(150);
3234 // Read a WebM file into memory and send the data to the demuxer. The chunk
3235 // size has been chosen carefully to ensure the preroll buffer used by the
3236 // partial append window trim must come from a previous Append() call.
3237 scoped_refptr<DecoderBuffer> buffer =
3238 ReadTestDataFile("bear-320x240-audio-only.webm");
3239 AppendDataInPieces(buffer->data(), buffer->data_size(), 128);
3241 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3242 CheckExpectedBuffers(stream, "50P 50 62 86 109 122 125 128");
3245 TEST_P(ChunkDemuxerTest, AppendWindow_AudioConfigUpdateRemovesPreroll) {
3246 EXPECT_CALL(*this, DemuxerOpened());
3247 demuxer_->Initialize(
3248 &host_,
3249 CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744), PIPELINE_OK),
3250 true);
3251 ASSERT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, HAS_AUDIO));
3253 // Set the append window such that the first file is completely before the
3254 // append window.
3255 // TODO(wolenetz/acolwell): Update this duration once the files are fixed to
3256 // have the correct duration in their init segments, and the
3257 // CreateInitDoneCB() call, above, is fixed to used that duration. See
3258 // http://crbug.com/354284.
3259 const base::TimeDelta duration_1 = base::TimeDelta::FromMilliseconds(2746);
3260 append_window_start_for_next_append_ = duration_1;
3262 // Read a WebM file into memory and append the data.
3263 scoped_refptr<DecoderBuffer> buffer =
3264 ReadTestDataFile("bear-320x240-audio-only.webm");
3265 AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
3266 CheckExpectedRanges(kSourceId, "{ }");
3268 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3269 AudioDecoderConfig config_1 = stream->audio_decoder_config();
3271 // Read a second WebM with a different config in and append the data.
3272 scoped_refptr<DecoderBuffer> buffer2 =
3273 ReadTestDataFile("bear-320x240-audio-only-48khz.webm");
3274 EXPECT_CALL(host_, SetDuration(_)).Times(AnyNumber());
3275 ASSERT_TRUE(SetTimestampOffset(kSourceId, duration_1));
3276 AppendDataInPieces(buffer2->data(), buffer2->data_size(), 512);
3277 CheckExpectedRanges(kSourceId, "{ [2746,5519) }");
3279 Seek(duration_1);
3280 ExpectConfigChanged(DemuxerStream::AUDIO);
3281 ASSERT_FALSE(config_1.Matches(stream->audio_decoder_config()));
3282 CheckExpectedBuffers(stream, "2746 2767 2789 2810");
3285 TEST_P(ChunkDemuxerTest, AppendWindow_Text) {
3286 DemuxerStream* text_stream = NULL;
3287 EXPECT_CALL(host_, AddTextStream(_, _))
3288 .WillOnce(SaveArg<0>(&text_stream));
3289 ASSERT_TRUE(InitDemuxer(HAS_VIDEO | HAS_TEXT));
3290 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3292 // Set the append window to [20,280).
3293 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
3294 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3296 // Append a cluster that starts before and ends after the append
3297 // window.
3298 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3299 "0K 30 60 90 120K 150 180 210 240K 270 300 330K");
3300 AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K 200K 300K");
3302 // Verify that text cues that start outside the window are not included
3303 // in the buffer. Also verify that cues that extend beyond the
3304 // window are not included.
3305 CheckExpectedRanges(kSourceId, "{ [120,270) }");
3306 CheckExpectedBuffers(video_stream, "120 150 180 210 240");
3307 CheckExpectedBuffers(text_stream, "100");
3309 // Extend the append window to [20,650).
3310 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3312 // Append more data and verify that a new range is created.
3313 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3314 "360 390 420K 450 480 510 540K 570 600 630K");
3315 AppendSingleStreamCluster(kSourceId, kTextTrackNum, "400K 500K 600K 700K");
3316 CheckExpectedRanges(kSourceId, "{ [120,270) [420,630) }");
3318 // Seek to the new range and verify that the expected buffers are returned.
3319 Seek(base::TimeDelta::FromMilliseconds(420));
3320 CheckExpectedBuffers(video_stream, "420 450 480 510 540 570 600");
3321 CheckExpectedBuffers(text_stream, "400 500");
3324 TEST_P(ChunkDemuxerTest, StartWaitingForSeekAfterParseError) {
3325 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3326 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
3327 AppendGarbage();
3328 base::TimeDelta seek_time = base::TimeDelta::FromSeconds(50);
3329 demuxer_->StartWaitingForSeek(seek_time);
3332 TEST_P(ChunkDemuxerTest, Remove_AudioVideoText) {
3333 DemuxerStream* text_stream = NULL;
3334 EXPECT_CALL(host_, AddTextStream(_, _))
3335 .WillOnce(SaveArg<0>(&text_stream));
3336 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
3338 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3339 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3341 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3342 "0K 20K 40K 60K 80K 100K 120K 140K");
3343 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3344 "0K 30 60 90 120K 150 180");
3345 AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K 200K");
3347 CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
3348 CheckExpectedBuffers(video_stream, "0 30 60 90 120 150 180");
3349 CheckExpectedBuffers(text_stream, "0 100 200");
3351 // Remove the buffers that were added.
3352 demuxer_->Remove(kSourceId, base::TimeDelta(),
3353 base::TimeDelta::FromMilliseconds(300));
3355 // Verify that all the appended data has been removed.
3356 CheckExpectedRanges(kSourceId, "{ }");
3358 // Append new buffers that are clearly different than the original
3359 // ones and verify that only the new buffers are returned.
3360 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3361 "1K 21K 41K 61K 81K 101K 121K 141K");
3362 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3363 "1K 31 61 91 121K 151 181");
3364 AppendSingleStreamCluster(kSourceId, kTextTrackNum, "1K 101K 201K");
3366 Seek(base::TimeDelta());
3367 CheckExpectedBuffers(audio_stream, "1 21 41 61 81 101 121 141");
3368 CheckExpectedBuffers(video_stream, "1 31 61 91 121 151 181");
3369 CheckExpectedBuffers(text_stream, "1 101 201");
3372 TEST_P(ChunkDemuxerTest, Remove_StartAtDuration) {
3373 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3374 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3376 // Set the duration to something small so that the append that
3377 // follows updates the duration to reflect the end of the appended data.
3378 EXPECT_CALL(host_, SetDuration(
3379 base::TimeDelta::FromMilliseconds(1)));
3380 demuxer_->SetDuration(0.001);
3382 EXPECT_CALL(host_, SetDuration(
3383 base::TimeDelta::FromMilliseconds(160)));
3384 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3385 "0K 20K 40K 60K 80K 100K 120K 140K");
3387 CheckExpectedRanges(kSourceId, "{ [0,160) }");
3388 CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
3390 demuxer_->Remove(kSourceId,
3391 base::TimeDelta::FromSecondsD(demuxer_->GetDuration()),
3392 kInfiniteDuration());
3394 Seek(base::TimeDelta());
3395 CheckExpectedRanges(kSourceId, "{ [0,160) }");
3396 CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
3399 // Verifies that a Seek() will complete without text cues for
3400 // the seek point and will return cues after the seek position
3401 // when they are eventually appended.
3402 TEST_P(ChunkDemuxerTest, SeekCompletesWithoutTextCues) {
3403 DemuxerStream* text_stream = NULL;
3404 EXPECT_CALL(host_, AddTextStream(_, _))
3405 .WillOnce(SaveArg<0>(&text_stream));
3406 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
3408 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3409 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3411 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(120);
3412 bool seek_cb_was_called = false;
3413 demuxer_->StartWaitingForSeek(seek_time);
3414 demuxer_->Seek(seek_time,
3415 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
3416 message_loop_.RunUntilIdle();
3418 EXPECT_FALSE(seek_cb_was_called);
3420 bool text_read_done = false;
3421 text_stream->Read(base::Bind(&OnReadDone,
3422 base::TimeDelta::FromMilliseconds(125),
3423 &text_read_done));
3425 // Append audio & video data so the seek completes.
3426 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3427 "0K 20K 40K 60K 80K 100K 120K 140K 160K 180K");
3428 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3429 "0K 30 60 90 120K 150 180 210");
3431 message_loop_.RunUntilIdle();
3432 EXPECT_TRUE(seek_cb_was_called);
3433 EXPECT_FALSE(text_read_done);
3435 // Read some audio & video buffers to further verify seek completion.
3436 CheckExpectedBuffers(audio_stream, "120 140");
3437 CheckExpectedBuffers(video_stream, "120 150");
3439 EXPECT_FALSE(text_read_done);
3441 // Append text cues that start after the seek point and verify that
3442 // they are returned by Read() calls.
3443 AppendSingleStreamCluster(kSourceId, kTextTrackNum, "125K 175K 225K");
3445 message_loop_.RunUntilIdle();
3446 EXPECT_TRUE(text_read_done);
3448 // NOTE: we start at 175 here because the buffer at 125 was returned
3449 // to the pending read initiated above.
3450 CheckExpectedBuffers(text_stream, "175 225");
3452 // Verify that audio & video streams continue to return expected values.
3453 CheckExpectedBuffers(audio_stream, "160 180");
3454 CheckExpectedBuffers(video_stream, "180 210");
3457 // Generate two sets of tests: one using FrameProcessor, and one using
3458 // LegacyFrameProcessor.
3459 INSTANTIATE_TEST_CASE_P(NewFrameProcessor, ChunkDemuxerTest, Values(false));
3460 INSTANTIATE_TEST_CASE_P(LegacyFrameProcessor, ChunkDemuxerTest, Values(true));
3462 } // namespace media