Cast: Stop logging kVideoFrameSentToEncoder and rename a couple events.
[chromium-blink-merge.git] / media / filters / chunk_demuxer_unittest.cc
blobef9e5966b57154be8a457888a004c8d539899a12
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include <algorithm>
7 #include "base/bind.h"
8 #include "base/message_loop/message_loop.h"
9 #include "base/strings/string_number_conversions.h"
10 #include "base/strings/string_split.h"
11 #include "base/strings/string_util.h"
12 #include "media/base/audio_decoder_config.h"
13 #include "media/base/decoder_buffer.h"
14 #include "media/base/decrypt_config.h"
15 #include "media/base/mock_demuxer_host.h"
16 #include "media/base/test_data_util.h"
17 #include "media/base/test_helpers.h"
18 #include "media/filters/chunk_demuxer.h"
19 #include "media/formats/webm/cluster_builder.h"
20 #include "media/formats/webm/webm_constants.h"
21 #include "media/formats/webm/webm_crypto_helpers.h"
22 #include "testing/gtest/include/gtest/gtest.h"
24 using ::testing::AnyNumber;
25 using ::testing::Exactly;
26 using ::testing::InSequence;
27 using ::testing::NotNull;
28 using ::testing::Return;
29 using ::testing::SaveArg;
30 using ::testing::SetArgumentPointee;
31 using ::testing::Values;
32 using ::testing::_;
34 namespace media {
36 const uint8 kTracksHeader[] = {
37 0x16, 0x54, 0xAE, 0x6B, // Tracks ID
38 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // tracks(size = 0)
41 // WebM Block bytes that represent a VP8 keyframe.
42 const uint8 kVP8Keyframe[] = {
43 0x010, 0x00, 0x00, 0x9d, 0x01, 0x2a, 0x00, 0x10, 0x00, 0x10, 0x00
46 // WebM Block bytes that represent a VP8 interframe.
47 const uint8 kVP8Interframe[] = { 0x11, 0x00, 0x00 };
49 const int kTracksHeaderSize = sizeof(kTracksHeader);
50 const int kTracksSizeOffset = 4;
52 // The size of TrackEntry element in test file "webm_vorbis_track_entry" starts
53 // at index 1 and spans 8 bytes.
54 const int kAudioTrackSizeOffset = 1;
55 const int kAudioTrackSizeWidth = 8;
56 const int kAudioTrackEntryHeaderSize =
57 kAudioTrackSizeOffset + kAudioTrackSizeWidth;
59 // The size of TrackEntry element in test file "webm_vp8_track_entry" starts at
60 // index 1 and spans 8 bytes.
61 const int kVideoTrackSizeOffset = 1;
62 const int kVideoTrackSizeWidth = 8;
63 const int kVideoTrackEntryHeaderSize =
64 kVideoTrackSizeOffset + kVideoTrackSizeWidth;
66 const int kVideoTrackNum = 1;
67 const int kAudioTrackNum = 2;
68 const int kTextTrackNum = 3;
70 const int kAudioBlockDuration = 23;
71 const int kVideoBlockDuration = 33;
72 const int kTextBlockDuration = 100;
73 const int kBlockSize = 10;
75 const char kSourceId[] = "SourceId";
76 const char kDefaultFirstClusterRange[] = "{ [0,46) }";
77 const int kDefaultFirstClusterEndTimestamp = 66;
78 const int kDefaultSecondClusterEndTimestamp = 132;
80 base::TimeDelta kDefaultDuration() {
81 return base::TimeDelta::FromMilliseconds(201224);
84 // Write an integer into buffer in the form of vint that spans 8 bytes.
85 // The data pointed by |buffer| should be at least 8 bytes long.
86 // |number| should be in the range 0 <= number < 0x00FFFFFFFFFFFFFF.
87 static void WriteInt64(uint8* buffer, int64 number) {
88 DCHECK(number >= 0 && number < 0x00FFFFFFFFFFFFFFLL);
89 buffer[0] = 0x01;
90 int64 tmp = number;
91 for (int i = 7; i > 0; i--) {
92 buffer[i] = tmp & 0xff;
93 tmp >>= 8;
97 MATCHER_P(HasTimestamp, timestamp_in_ms, "") {
98 return arg.get() && !arg->end_of_stream() &&
99 arg->timestamp().InMilliseconds() == timestamp_in_ms;
102 MATCHER(IsEndOfStream, "") { return arg.get() && arg->end_of_stream(); }
104 static void OnReadDone(const base::TimeDelta& expected_time,
105 bool* called,
106 DemuxerStream::Status status,
107 const scoped_refptr<DecoderBuffer>& buffer) {
108 EXPECT_EQ(status, DemuxerStream::kOk);
109 EXPECT_EQ(expected_time, buffer->timestamp());
110 *called = true;
113 static void OnReadDone_AbortExpected(
114 bool* called, DemuxerStream::Status status,
115 const scoped_refptr<DecoderBuffer>& buffer) {
116 EXPECT_EQ(status, DemuxerStream::kAborted);
117 EXPECT_EQ(NULL, buffer.get());
118 *called = true;
121 static void OnReadDone_EOSExpected(bool* called,
122 DemuxerStream::Status status,
123 const scoped_refptr<DecoderBuffer>& buffer) {
124 EXPECT_EQ(status, DemuxerStream::kOk);
125 EXPECT_TRUE(buffer->end_of_stream());
126 *called = true;
129 static void OnSeekDone_OKExpected(bool* called, PipelineStatus status) {
130 EXPECT_EQ(status, PIPELINE_OK);
131 *called = true;
134 static void LogFunc(const std::string& str) { DVLOG(1) << str; }
136 // Test parameter determines which coded frame processor is used to process
137 // appended data. If true, LegacyFrameProcessor is used. Otherwise, (not yet
138 // supported), a more compliant frame processor is used.
139 // TODO(wolenetz): Enable usage of new frame processor based on this flag.
140 // See http://crbug.com/249422.
141 class ChunkDemuxerTest : public ::testing::TestWithParam<bool> {
142 protected:
143 enum CodecsIndex {
144 AUDIO,
145 VIDEO,
146 MAX_CODECS_INDEX
149 // Default cluster to append first for simple tests.
150 scoped_ptr<Cluster> kDefaultFirstCluster() {
151 return GenerateCluster(0, 4);
154 // Default cluster to append after kDefaultFirstCluster()
155 // has been appended. This cluster starts with blocks that
156 // have timestamps consistent with the end times of the blocks
157 // in kDefaultFirstCluster() so that these two clusters represent
158 // a continuous region.
159 scoped_ptr<Cluster> kDefaultSecondCluster() {
160 return GenerateCluster(46, 66, 5);
163 ChunkDemuxerTest()
164 : append_window_end_for_next_append_(kInfiniteDuration()) {
165 use_legacy_frame_processor_ = GetParam();
166 CreateNewDemuxer();
169 void CreateNewDemuxer() {
170 base::Closure open_cb =
171 base::Bind(&ChunkDemuxerTest::DemuxerOpened, base::Unretained(this));
172 Demuxer::NeedKeyCB need_key_cb =
173 base::Bind(&ChunkDemuxerTest::DemuxerNeedKey, base::Unretained(this));
174 demuxer_.reset(
175 new ChunkDemuxer(open_cb, need_key_cb, base::Bind(&LogFunc), true));
178 virtual ~ChunkDemuxerTest() {
179 ShutdownDemuxer();
182 void CreateInitSegment(int stream_flags,
183 bool is_audio_encrypted, bool is_video_encrypted,
184 scoped_ptr<uint8[]>* buffer,
185 int* size) {
186 bool has_audio = (stream_flags & HAS_AUDIO) != 0;
187 bool has_video = (stream_flags & HAS_VIDEO) != 0;
188 bool has_text = (stream_flags & HAS_TEXT) != 0;
189 scoped_refptr<DecoderBuffer> ebml_header;
190 scoped_refptr<DecoderBuffer> info;
191 scoped_refptr<DecoderBuffer> audio_track_entry;
192 scoped_refptr<DecoderBuffer> video_track_entry;
193 scoped_refptr<DecoderBuffer> audio_content_encodings;
194 scoped_refptr<DecoderBuffer> video_content_encodings;
195 scoped_refptr<DecoderBuffer> text_track_entry;
197 ebml_header = ReadTestDataFile("webm_ebml_element");
199 info = ReadTestDataFile("webm_info_element");
201 int tracks_element_size = 0;
203 if (has_audio) {
204 audio_track_entry = ReadTestDataFile("webm_vorbis_track_entry");
205 tracks_element_size += audio_track_entry->data_size();
206 if (is_audio_encrypted) {
207 audio_content_encodings = ReadTestDataFile("webm_content_encodings");
208 tracks_element_size += audio_content_encodings->data_size();
212 if (has_video) {
213 video_track_entry = ReadTestDataFile("webm_vp8_track_entry");
214 tracks_element_size += video_track_entry->data_size();
215 if (is_video_encrypted) {
216 video_content_encodings = ReadTestDataFile("webm_content_encodings");
217 tracks_element_size += video_content_encodings->data_size();
221 if (has_text) {
222 // TODO(matthewjheaney): create an abstraction to do
223 // this (http://crbug/321454).
224 // We need it to also handle the creation of multiple text tracks.
226 // This is the track entry for a text track,
227 // TrackEntry [AE], size=30
228 // TrackNum [D7], size=1, val=3
229 // TrackUID [73] [C5], size=1, value=3
230 // TrackType [83], size=1, val=0x11
231 // CodecId [86], size=18, val="D_WEBVTT/SUBTITLES"
232 const char str[] = "\xAE\x9E\xD7\x81\x03\x73\xC5\x81\x03"
233 "\x83\x81\x11\x86\x92"
234 "D_WEBVTT/SUBTITLES";
235 const int len = strlen(str);
236 DCHECK_EQ(len, 32);
237 const uint8* const buf = reinterpret_cast<const uint8*>(str);
238 text_track_entry = DecoderBuffer::CopyFrom(buf, len);
239 tracks_element_size += text_track_entry->data_size();
242 *size = ebml_header->data_size() + info->data_size() +
243 kTracksHeaderSize + tracks_element_size;
245 buffer->reset(new uint8[*size]);
247 uint8* buf = buffer->get();
248 memcpy(buf, ebml_header->data(), ebml_header->data_size());
249 buf += ebml_header->data_size();
251 memcpy(buf, info->data(), info->data_size());
252 buf += info->data_size();
254 memcpy(buf, kTracksHeader, kTracksHeaderSize);
255 WriteInt64(buf + kTracksSizeOffset, tracks_element_size);
256 buf += kTracksHeaderSize;
258 // TODO(xhwang): Simplify this! Probably have test data files that contain
259 // ContentEncodings directly instead of trying to create one at run-time.
260 if (has_audio) {
261 memcpy(buf, audio_track_entry->data(),
262 audio_track_entry->data_size());
263 if (is_audio_encrypted) {
264 memcpy(buf + audio_track_entry->data_size(),
265 audio_content_encodings->data(),
266 audio_content_encodings->data_size());
267 WriteInt64(buf + kAudioTrackSizeOffset,
268 audio_track_entry->data_size() +
269 audio_content_encodings->data_size() -
270 kAudioTrackEntryHeaderSize);
271 buf += audio_content_encodings->data_size();
273 buf += audio_track_entry->data_size();
276 if (has_video) {
277 memcpy(buf, video_track_entry->data(),
278 video_track_entry->data_size());
279 if (is_video_encrypted) {
280 memcpy(buf + video_track_entry->data_size(),
281 video_content_encodings->data(),
282 video_content_encodings->data_size());
283 WriteInt64(buf + kVideoTrackSizeOffset,
284 video_track_entry->data_size() +
285 video_content_encodings->data_size() -
286 kVideoTrackEntryHeaderSize);
287 buf += video_content_encodings->data_size();
289 buf += video_track_entry->data_size();
292 if (has_text) {
293 memcpy(buf, text_track_entry->data(),
294 text_track_entry->data_size());
295 buf += text_track_entry->data_size();
299 ChunkDemuxer::Status AddId() {
300 return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
303 ChunkDemuxer::Status AddId(const std::string& source_id, int stream_flags) {
304 bool has_audio = (stream_flags & HAS_AUDIO) != 0;
305 bool has_video = (stream_flags & HAS_VIDEO) != 0;
306 std::vector<std::string> codecs;
307 std::string type;
309 if (has_audio) {
310 codecs.push_back("vorbis");
311 type = "audio/webm";
314 if (has_video) {
315 codecs.push_back("vp8");
316 type = "video/webm";
319 if (!has_audio && !has_video) {
320 return AddId(kSourceId, HAS_AUDIO | HAS_VIDEO);
323 return demuxer_->AddId(source_id, type, codecs,
324 use_legacy_frame_processor_);
327 void AppendData(const uint8* data, size_t length) {
328 AppendData(kSourceId, data, length);
331 void AppendCluster(const std::string& source_id,
332 scoped_ptr<Cluster> cluster) {
333 AppendData(source_id, cluster->data(), cluster->size());
336 void AppendCluster(scoped_ptr<Cluster> cluster) {
337 AppendCluster(kSourceId, cluster.Pass());
340 void AppendCluster(int timecode, int block_count) {
341 AppendCluster(GenerateCluster(timecode, block_count));
344 void AppendSingleStreamCluster(const std::string& source_id, int track_number,
345 int timecode, int block_count) {
346 int block_duration = 0;
347 switch (track_number) {
348 case kVideoTrackNum:
349 block_duration = kVideoBlockDuration;
350 break;
351 case kAudioTrackNum:
352 block_duration = kAudioBlockDuration;
353 break;
354 case kTextTrackNum:
355 block_duration = kTextBlockDuration;
356 break;
358 ASSERT_NE(block_duration, 0);
359 int end_timecode = timecode + block_count * block_duration;
360 AppendCluster(source_id,
361 GenerateSingleStreamCluster(
362 timecode, end_timecode, track_number, block_duration));
365 // |cluster_description| - A space delimited string of buffer info that
366 // is used to construct a cluster. Each buffer info is a timestamp in
367 // milliseconds and optionally followed by a 'K' to indicate that a buffer
368 // should be marked as a keyframe. For example "0K 30 60" should constuct
369 // a cluster with 3 blocks: a keyframe with timestamp 0 and 2 non-keyframes
370 // at 30ms and 60ms.
371 void AppendSingleStreamCluster(const std::string& source_id, int track_number,
372 const std::string& cluster_description) {
373 std::vector<std::string> timestamps;
374 base::SplitString(cluster_description, ' ', &timestamps);
376 ClusterBuilder cb;
377 std::vector<uint8> data(10);
378 for (size_t i = 0; i < timestamps.size(); ++i) {
379 std::string timestamp_str = timestamps[i];
380 int block_flags = 0;
381 if (EndsWith(timestamp_str, "K", true)) {
382 block_flags = kWebMFlagKeyframe;
383 // Remove the "K" off of the token.
384 timestamp_str = timestamp_str.substr(0, timestamps[i].length() - 1);
386 int timestamp_in_ms;
387 CHECK(base::StringToInt(timestamp_str, &timestamp_in_ms));
389 if (i == 0)
390 cb.SetClusterTimecode(timestamp_in_ms);
392 if (track_number == kTextTrackNum) {
393 cb.AddBlockGroup(track_number, timestamp_in_ms, kTextBlockDuration,
394 block_flags, &data[0], data.size());
395 } else {
396 cb.AddSimpleBlock(track_number, timestamp_in_ms, block_flags,
397 &data[0], data.size());
400 AppendCluster(source_id, cb.Finish());
403 void AppendData(const std::string& source_id,
404 const uint8* data, size_t length) {
405 EXPECT_CALL(host_, AddBufferedTimeRange(_, _)).Times(AnyNumber());
407 // TODO(wolenetz): Test timestamp offset updating once "sequence" append
408 // mode processing is implemented. See http://crbug.com/249422.
409 demuxer_->AppendData(source_id, data, length,
410 append_window_start_for_next_append_,
411 append_window_end_for_next_append_,
412 &timestamp_offset_map_[source_id]);
415 void AppendDataInPieces(const uint8* data, size_t length) {
416 AppendDataInPieces(data, length, 7);
419 void AppendDataInPieces(const uint8* data, size_t length, size_t piece_size) {
420 const uint8* start = data;
421 const uint8* end = data + length;
422 while (start < end) {
423 size_t append_size = std::min(piece_size,
424 static_cast<size_t>(end - start));
425 AppendData(start, append_size);
426 start += append_size;
430 void AppendInitSegment(int stream_flags) {
431 AppendInitSegmentWithSourceId(kSourceId, stream_flags);
434 void AppendInitSegmentWithSourceId(const std::string& source_id,
435 int stream_flags) {
436 AppendInitSegmentWithEncryptedInfo(source_id, stream_flags, false, false);
439 void AppendInitSegmentWithEncryptedInfo(const std::string& source_id,
440 int stream_flags,
441 bool is_audio_encrypted,
442 bool is_video_encrypted) {
443 scoped_ptr<uint8[]> info_tracks;
444 int info_tracks_size = 0;
445 CreateInitSegment(stream_flags,
446 is_audio_encrypted, is_video_encrypted,
447 &info_tracks, &info_tracks_size);
448 AppendData(source_id, info_tracks.get(), info_tracks_size);
451 void AppendGarbage() {
452 // Fill up an array with gibberish.
453 int garbage_cluster_size = 10;
454 scoped_ptr<uint8[]> garbage_cluster(new uint8[garbage_cluster_size]);
455 for (int i = 0; i < garbage_cluster_size; ++i)
456 garbage_cluster[i] = i;
457 AppendData(garbage_cluster.get(), garbage_cluster_size);
460 void InitDoneCalled(PipelineStatus expected_status,
461 PipelineStatus status) {
462 EXPECT_EQ(status, expected_status);
465 void AppendEmptyCluster(int timecode) {
466 AppendCluster(GenerateEmptyCluster(timecode));
469 PipelineStatusCB CreateInitDoneCB(const base::TimeDelta& expected_duration,
470 PipelineStatus expected_status) {
471 if (expected_duration != kNoTimestamp())
472 EXPECT_CALL(host_, SetDuration(expected_duration));
473 return CreateInitDoneCB(expected_status);
476 PipelineStatusCB CreateInitDoneCB(PipelineStatus expected_status) {
477 return base::Bind(&ChunkDemuxerTest::InitDoneCalled,
478 base::Unretained(this),
479 expected_status);
482 enum StreamFlags {
483 HAS_AUDIO = 1 << 0,
484 HAS_VIDEO = 1 << 1,
485 HAS_TEXT = 1 << 2
488 bool InitDemuxer(int stream_flags) {
489 return InitDemuxerWithEncryptionInfo(stream_flags, false, false);
492 bool InitDemuxerWithEncryptionInfo(
493 int stream_flags, bool is_audio_encrypted, bool is_video_encrypted) {
495 PipelineStatus expected_status =
496 (stream_flags != 0) ? PIPELINE_OK : DEMUXER_ERROR_COULD_NOT_OPEN;
498 base::TimeDelta expected_duration = kNoTimestamp();
499 if (expected_status == PIPELINE_OK)
500 expected_duration = kDefaultDuration();
502 EXPECT_CALL(*this, DemuxerOpened());
503 demuxer_->Initialize(
504 &host_, CreateInitDoneCB(expected_duration, expected_status), true);
506 if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
507 return false;
509 AppendInitSegmentWithEncryptedInfo(
510 kSourceId, stream_flags,
511 is_audio_encrypted, is_video_encrypted);
512 return true;
515 bool InitDemuxerAudioAndVideoSourcesText(const std::string& audio_id,
516 const std::string& video_id,
517 bool has_text) {
518 EXPECT_CALL(*this, DemuxerOpened());
519 demuxer_->Initialize(
520 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
522 if (AddId(audio_id, HAS_AUDIO) != ChunkDemuxer::kOk)
523 return false;
524 if (AddId(video_id, HAS_VIDEO) != ChunkDemuxer::kOk)
525 return false;
527 int audio_flags = HAS_AUDIO;
528 int video_flags = HAS_VIDEO;
530 if (has_text) {
531 audio_flags |= HAS_TEXT;
532 video_flags |= HAS_TEXT;
535 AppendInitSegmentWithSourceId(audio_id, audio_flags);
536 AppendInitSegmentWithSourceId(video_id, video_flags);
537 return true;
540 bool InitDemuxerAudioAndVideoSources(const std::string& audio_id,
541 const std::string& video_id) {
542 return InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, false);
545 // Initializes the demuxer with data from 2 files with different
546 // decoder configurations. This is used to test the decoder config change
547 // logic.
549 // bear-320x240.webm VideoDecoderConfig returns 320x240 for its natural_size()
550 // bear-640x360.webm VideoDecoderConfig returns 640x360 for its natural_size()
551 // The resulting video stream returns data from each file for the following
552 // time ranges.
553 // bear-320x240.webm : [0-501) [801-2736)
554 // bear-640x360.webm : [527-793)
556 // bear-320x240.webm AudioDecoderConfig returns 3863 for its extra_data_size()
557 // bear-640x360.webm AudioDecoderConfig returns 3935 for its extra_data_size()
558 // The resulting audio stream returns data from each file for the following
559 // time ranges.
560 // bear-320x240.webm : [0-524) [779-2736)
561 // bear-640x360.webm : [527-759)
562 bool InitDemuxerWithConfigChangeData() {
563 scoped_refptr<DecoderBuffer> bear1 = ReadTestDataFile("bear-320x240.webm");
564 scoped_refptr<DecoderBuffer> bear2 = ReadTestDataFile("bear-640x360.webm");
566 EXPECT_CALL(*this, DemuxerOpened());
568 demuxer_->Initialize(
569 &host_, CreateInitDoneCB(base::TimeDelta::FromMilliseconds(2744),
570 PIPELINE_OK), true);
572 if (AddId(kSourceId, HAS_AUDIO | HAS_VIDEO) != ChunkDemuxer::kOk)
573 return false;
575 // Append the whole bear1 file.
576 // TODO(wolenetz/acolwell): Remove this extra SetDuration expectation once
577 // the files are fixed to have the correct duration in their init segments,
578 // and the CreateInitDoneCB() call, above, is fixed to used that duration.
579 // See http://crbug.com/354284.
580 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
581 AppendData(bear1->data(), bear1->data_size());
582 // Last audio frame has timestamp 2721 and duration 24 (estimated from max
583 // seen so far for audio track).
584 // Last video frame has timestamp 2703 and duration 33 (from TrackEntry
585 // DefaultDuration for video track).
586 CheckExpectedRanges(kSourceId, "{ [0,2736) }");
588 // Append initialization segment for bear2.
589 // Note: Offsets here and below are derived from
590 // media/test/data/bear-640x360-manifest.js and
591 // media/test/data/bear-320x240-manifest.js which were
592 // generated from media/test/data/bear-640x360.webm and
593 // media/test/data/bear-320x240.webm respectively.
594 AppendData(bear2->data(), 4340);
596 // Append a media segment that goes from [0.527000, 1.014000).
597 AppendData(bear2->data() + 55290, 18785);
598 CheckExpectedRanges(kSourceId, "{ [0,1027) [1201,2736) }");
600 // Append initialization segment for bear1 & fill gap with [779-1197)
601 // segment.
602 AppendData(bear1->data(), 4370);
603 AppendData(bear1->data() + 72737, 28183);
604 CheckExpectedRanges(kSourceId, "{ [0,2736) }");
606 MarkEndOfStream(PIPELINE_OK);
607 return true;
610 void ShutdownDemuxer() {
611 if (demuxer_) {
612 demuxer_->Shutdown();
613 message_loop_.RunUntilIdle();
617 void AddSimpleBlock(ClusterBuilder* cb, int track_num, int64 timecode) {
618 uint8 data[] = { 0x00 };
619 cb->AddSimpleBlock(track_num, timecode, 0, data, sizeof(data));
622 scoped_ptr<Cluster> GenerateCluster(int timecode, int block_count) {
623 return GenerateCluster(timecode, timecode, block_count);
626 void AddVideoBlockGroup(ClusterBuilder* cb, int track_num, int64 timecode,
627 int duration, int flags) {
628 const uint8* data =
629 (flags & kWebMFlagKeyframe) != 0 ? kVP8Keyframe : kVP8Interframe;
630 int size = (flags & kWebMFlagKeyframe) != 0 ? sizeof(kVP8Keyframe) :
631 sizeof(kVP8Interframe);
632 cb->AddBlockGroup(track_num, timecode, duration, flags, data, size);
635 scoped_ptr<Cluster> GenerateCluster(int first_audio_timecode,
636 int first_video_timecode,
637 int block_count) {
638 CHECK_GT(block_count, 0);
640 int size = 10;
641 scoped_ptr<uint8[]> data(new uint8[size]);
643 ClusterBuilder cb;
644 cb.SetClusterTimecode(std::min(first_audio_timecode, first_video_timecode));
646 if (block_count == 1) {
647 cb.AddBlockGroup(kAudioTrackNum, first_audio_timecode,
648 kAudioBlockDuration, kWebMFlagKeyframe,
649 data.get(), size);
650 return cb.Finish();
653 int audio_timecode = first_audio_timecode;
654 int video_timecode = first_video_timecode;
656 // Create simple blocks for everything except the last 2 blocks.
657 // The first video frame must be a keyframe.
658 uint8 video_flag = kWebMFlagKeyframe;
659 for (int i = 0; i < block_count - 2; i++) {
660 if (audio_timecode <= video_timecode) {
661 cb.AddSimpleBlock(kAudioTrackNum, audio_timecode, kWebMFlagKeyframe,
662 data.get(), size);
663 audio_timecode += kAudioBlockDuration;
664 continue;
667 cb.AddSimpleBlock(kVideoTrackNum, video_timecode, video_flag, data.get(),
668 size);
669 video_timecode += kVideoBlockDuration;
670 video_flag = 0;
673 // Make the last 2 blocks BlockGroups so that they don't get delayed by the
674 // block duration calculation logic.
675 if (audio_timecode <= video_timecode) {
676 cb.AddBlockGroup(kAudioTrackNum, audio_timecode, kAudioBlockDuration,
677 kWebMFlagKeyframe, data.get(), size);
678 AddVideoBlockGroup(&cb, kVideoTrackNum, video_timecode,
679 kVideoBlockDuration, video_flag);
680 } else {
681 AddVideoBlockGroup(&cb, kVideoTrackNum, video_timecode,
682 kVideoBlockDuration, video_flag);
683 cb.AddBlockGroup(kAudioTrackNum, audio_timecode, kAudioBlockDuration,
684 kWebMFlagKeyframe, data.get(), size);
687 return cb.Finish();
690 scoped_ptr<Cluster> GenerateSingleStreamCluster(int timecode,
691 int end_timecode,
692 int track_number,
693 int block_duration) {
694 CHECK_GT(end_timecode, timecode);
696 std::vector<uint8> data(kBlockSize);
698 ClusterBuilder cb;
699 cb.SetClusterTimecode(timecode);
701 // Create simple blocks for everything except the last block.
702 while (timecode < (end_timecode - block_duration)) {
703 cb.AddSimpleBlock(track_number, timecode, kWebMFlagKeyframe,
704 &data[0], data.size());
705 timecode += block_duration;
708 if (track_number == kVideoTrackNum) {
709 AddVideoBlockGroup(&cb, track_number, timecode, block_duration,
710 kWebMFlagKeyframe);
711 } else {
712 cb.AddBlockGroup(track_number, timecode, block_duration,
713 kWebMFlagKeyframe, &data[0], data.size());
716 return cb.Finish();
719 void Read(DemuxerStream::Type type, const DemuxerStream::ReadCB& read_cb) {
720 demuxer_->GetStream(type)->Read(read_cb);
721 message_loop_.RunUntilIdle();
724 void ReadAudio(const DemuxerStream::ReadCB& read_cb) {
725 Read(DemuxerStream::AUDIO, read_cb);
728 void ReadVideo(const DemuxerStream::ReadCB& read_cb) {
729 Read(DemuxerStream::VIDEO, read_cb);
732 void GenerateExpectedReads(int timecode, int block_count) {
733 GenerateExpectedReads(timecode, timecode, block_count);
736 void GenerateExpectedReads(int start_audio_timecode,
737 int start_video_timecode,
738 int block_count) {
739 CHECK_GT(block_count, 0);
741 if (block_count == 1) {
742 ExpectRead(DemuxerStream::AUDIO, start_audio_timecode);
743 return;
746 int audio_timecode = start_audio_timecode;
747 int video_timecode = start_video_timecode;
749 for (int i = 0; i < block_count; i++) {
750 if (audio_timecode <= video_timecode) {
751 ExpectRead(DemuxerStream::AUDIO, audio_timecode);
752 audio_timecode += kAudioBlockDuration;
753 continue;
756 ExpectRead(DemuxerStream::VIDEO, video_timecode);
757 video_timecode += kVideoBlockDuration;
761 void GenerateSingleStreamExpectedReads(int timecode,
762 int block_count,
763 DemuxerStream::Type type,
764 int block_duration) {
765 CHECK_GT(block_count, 0);
766 int stream_timecode = timecode;
768 for (int i = 0; i < block_count; i++) {
769 ExpectRead(type, stream_timecode);
770 stream_timecode += block_duration;
774 void GenerateAudioStreamExpectedReads(int timecode, int block_count) {
775 GenerateSingleStreamExpectedReads(
776 timecode, block_count, DemuxerStream::AUDIO, kAudioBlockDuration);
779 void GenerateVideoStreamExpectedReads(int timecode, int block_count) {
780 GenerateSingleStreamExpectedReads(
781 timecode, block_count, DemuxerStream::VIDEO, kVideoBlockDuration);
784 scoped_ptr<Cluster> GenerateEmptyCluster(int timecode) {
785 ClusterBuilder cb;
786 cb.SetClusterTimecode(timecode);
787 return cb.Finish();
790 void CheckExpectedRanges(const std::string& expected) {
791 CheckExpectedRanges(kSourceId, expected);
794 void CheckExpectedRanges(const std::string& id,
795 const std::string& expected) {
796 Ranges<base::TimeDelta> r = demuxer_->GetBufferedRanges(id);
798 std::stringstream ss;
799 ss << "{ ";
800 for (size_t i = 0; i < r.size(); ++i) {
801 ss << "[" << r.start(i).InMilliseconds() << ","
802 << r.end(i).InMilliseconds() << ") ";
804 ss << "}";
805 EXPECT_EQ(expected, ss.str());
808 MOCK_METHOD2(ReadDone, void(DemuxerStream::Status status,
809 const scoped_refptr<DecoderBuffer>&));
811 void StoreStatusAndBuffer(DemuxerStream::Status* status_out,
812 scoped_refptr<DecoderBuffer>* buffer_out,
813 DemuxerStream::Status status,
814 const scoped_refptr<DecoderBuffer>& buffer) {
815 *status_out = status;
816 *buffer_out = buffer;
819 void ReadUntilNotOkOrEndOfStream(DemuxerStream::Type type,
820 DemuxerStream::Status* status,
821 base::TimeDelta* last_timestamp) {
822 DemuxerStream* stream = demuxer_->GetStream(type);
823 scoped_refptr<DecoderBuffer> buffer;
825 *last_timestamp = kNoTimestamp();
826 do {
827 stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
828 base::Unretained(this), status, &buffer));
829 base::MessageLoop::current()->RunUntilIdle();
830 if (*status == DemuxerStream::kOk && !buffer->end_of_stream())
831 *last_timestamp = buffer->timestamp();
832 } while (*status == DemuxerStream::kOk && !buffer->end_of_stream());
835 void ExpectEndOfStream(DemuxerStream::Type type) {
836 EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk, IsEndOfStream()));
837 demuxer_->GetStream(type)->Read(base::Bind(
838 &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
839 message_loop_.RunUntilIdle();
842 void ExpectRead(DemuxerStream::Type type, int64 timestamp_in_ms) {
843 EXPECT_CALL(*this, ReadDone(DemuxerStream::kOk,
844 HasTimestamp(timestamp_in_ms)));
845 demuxer_->GetStream(type)->Read(base::Bind(
846 &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
847 message_loop_.RunUntilIdle();
850 void ExpectConfigChanged(DemuxerStream::Type type) {
851 EXPECT_CALL(*this, ReadDone(DemuxerStream::kConfigChanged, _));
852 demuxer_->GetStream(type)->Read(base::Bind(
853 &ChunkDemuxerTest::ReadDone, base::Unretained(this)));
854 message_loop_.RunUntilIdle();
857 void CheckExpectedBuffers(DemuxerStream* stream,
858 const std::string& expected) {
859 std::vector<std::string> timestamps;
860 base::SplitString(expected, ' ', &timestamps);
861 std::stringstream ss;
862 for (size_t i = 0; i < timestamps.size(); ++i) {
863 DemuxerStream::Status status;
864 scoped_refptr<DecoderBuffer> buffer;
865 stream->Read(base::Bind(&ChunkDemuxerTest::StoreStatusAndBuffer,
866 base::Unretained(this), &status, &buffer));
867 base::MessageLoop::current()->RunUntilIdle();
868 if (status != DemuxerStream::kOk || buffer->end_of_stream())
869 break;
871 if (i > 0)
872 ss << " ";
873 ss << buffer->timestamp().InMilliseconds();
875 EXPECT_EQ(expected, ss.str());
878 MOCK_METHOD1(Checkpoint, void(int id));
880 struct BufferTimestamps {
881 int video_time_ms;
882 int audio_time_ms;
884 static const int kSkip = -1;
886 // Test parsing a WebM file.
887 // |filename| - The name of the file in media/test/data to parse.
888 // |timestamps| - The expected timestamps on the parsed buffers.
889 // a timestamp of kSkip indicates that a Read() call for that stream
890 // shouldn't be made on that iteration of the loop. If both streams have
891 // a kSkip then the loop will terminate.
892 bool ParseWebMFile(const std::string& filename,
893 const BufferTimestamps* timestamps,
894 const base::TimeDelta& duration) {
895 return ParseWebMFile(filename, timestamps, duration, HAS_AUDIO | HAS_VIDEO);
898 bool ParseWebMFile(const std::string& filename,
899 const BufferTimestamps* timestamps,
900 const base::TimeDelta& duration,
901 int stream_flags) {
902 EXPECT_CALL(*this, DemuxerOpened());
903 demuxer_->Initialize(
904 &host_, CreateInitDoneCB(duration, PIPELINE_OK), true);
906 if (AddId(kSourceId, stream_flags) != ChunkDemuxer::kOk)
907 return false;
909 // Read a WebM file into memory and send the data to the demuxer.
910 scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(filename);
911 AppendDataInPieces(buffer->data(), buffer->data_size(), 512);
913 // Verify that the timestamps on the first few packets match what we
914 // expect.
915 for (size_t i = 0;
916 (timestamps[i].audio_time_ms != kSkip ||
917 timestamps[i].video_time_ms != kSkip);
918 i++) {
919 bool audio_read_done = false;
920 bool video_read_done = false;
922 if (timestamps[i].audio_time_ms != kSkip) {
923 ReadAudio(base::Bind(&OnReadDone,
924 base::TimeDelta::FromMilliseconds(
925 timestamps[i].audio_time_ms),
926 &audio_read_done));
927 EXPECT_TRUE(audio_read_done);
930 if (timestamps[i].video_time_ms != kSkip) {
931 ReadVideo(base::Bind(&OnReadDone,
932 base::TimeDelta::FromMilliseconds(
933 timestamps[i].video_time_ms),
934 &video_read_done));
935 EXPECT_TRUE(video_read_done);
939 return true;
942 MOCK_METHOD0(DemuxerOpened, void());
943 // TODO(xhwang): This is a workaround of the issue that move-only parameters
944 // are not supported in mocked methods. Remove this when the issue is fixed
945 // (http://code.google.com/p/googletest/issues/detail?id=395) or when we use
946 // std::string instead of scoped_ptr<uint8[]> (http://crbug.com/130689).
947 MOCK_METHOD3(NeedKeyMock, void(const std::string& type,
948 const uint8* init_data, int init_data_size));
949 void DemuxerNeedKey(const std::string& type,
950 const std::vector<uint8>& init_data) {
951 const uint8* init_data_ptr = init_data.empty() ? NULL : &init_data[0];
952 NeedKeyMock(type, init_data_ptr, init_data.size());
955 void Seek(base::TimeDelta seek_time) {
956 demuxer_->StartWaitingForSeek(seek_time);
957 demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
958 message_loop_.RunUntilIdle();
961 void MarkEndOfStream(PipelineStatus status) {
962 demuxer_->MarkEndOfStream(status);
963 message_loop_.RunUntilIdle();
966 bool SetTimestampOffset(const std::string& id,
967 base::TimeDelta timestamp_offset) {
968 if (demuxer_->IsParsingMediaSegment(id))
969 return false;
971 timestamp_offset_map_[id] = timestamp_offset;
972 return true;
975 base::MessageLoop message_loop_;
976 MockDemuxerHost host_;
978 scoped_ptr<ChunkDemuxer> demuxer_;
979 bool use_legacy_frame_processor_;
981 base::TimeDelta append_window_start_for_next_append_;
982 base::TimeDelta append_window_end_for_next_append_;
984 // Map of source id to timestamp offset to use for the next AppendData()
985 // operation for that source id.
986 std::map<std::string, base::TimeDelta> timestamp_offset_map_;
988 private:
989 DISALLOW_COPY_AND_ASSIGN(ChunkDemuxerTest);
992 TEST_P(ChunkDemuxerTest, Init) {
993 // Test no streams, audio-only, video-only, and audio & video scenarios.
994 // Audio and video streams can be encrypted or not encrypted.
995 for (int i = 0; i < 16; i++) {
996 bool has_audio = (i & 0x1) != 0;
997 bool has_video = (i & 0x2) != 0;
998 bool is_audio_encrypted = (i & 0x4) != 0;
999 bool is_video_encrypted = (i & 0x8) != 0;
1001 // No test on invalid combination.
1002 if ((!has_audio && is_audio_encrypted) ||
1003 (!has_video && is_video_encrypted)) {
1004 continue;
1007 CreateNewDemuxer();
1009 if (is_audio_encrypted || is_video_encrypted) {
1010 int need_key_count = (is_audio_encrypted ? 1 : 0) +
1011 (is_video_encrypted ? 1 : 0);
1012 EXPECT_CALL(*this, NeedKeyMock(kWebMEncryptInitDataType, NotNull(),
1013 DecryptConfig::kDecryptionKeySize))
1014 .Times(Exactly(need_key_count));
1017 int stream_flags = 0;
1018 if (has_audio)
1019 stream_flags |= HAS_AUDIO;
1021 if (has_video)
1022 stream_flags |= HAS_VIDEO;
1024 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1025 stream_flags, is_audio_encrypted, is_video_encrypted));
1027 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1028 if (has_audio) {
1029 ASSERT_TRUE(audio_stream);
1031 const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
1032 EXPECT_EQ(kCodecVorbis, config.codec());
1033 EXPECT_EQ(32, config.bits_per_channel());
1034 EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
1035 EXPECT_EQ(44100, config.samples_per_second());
1036 EXPECT_TRUE(config.extra_data());
1037 EXPECT_GT(config.extra_data_size(), 0u);
1038 EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
1039 EXPECT_EQ(is_audio_encrypted,
1040 audio_stream->audio_decoder_config().is_encrypted());
1041 EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
1042 ->supports_partial_append_window_trimming());
1043 } else {
1044 EXPECT_FALSE(audio_stream);
1047 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1048 if (has_video) {
1049 EXPECT_TRUE(video_stream);
1050 EXPECT_EQ(is_video_encrypted,
1051 video_stream->video_decoder_config().is_encrypted());
1052 EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
1053 ->supports_partial_append_window_trimming());
1054 } else {
1055 EXPECT_FALSE(video_stream);
1058 ShutdownDemuxer();
1059 demuxer_.reset();
1063 // TODO(acolwell): Fold this test into Init tests since the tests are
1064 // almost identical.
1065 TEST_P(ChunkDemuxerTest, InitText) {
1066 // Test with 1 video stream and 1 text streams, and 0 or 1 audio streams.
1067 // No encryption cases handled here.
1068 bool has_video = true;
1069 bool is_audio_encrypted = false;
1070 bool is_video_encrypted = false;
1071 for (int i = 0; i < 2; i++) {
1072 bool has_audio = (i & 0x1) != 0;
1074 CreateNewDemuxer();
1076 DemuxerStream* text_stream = NULL;
1077 TextTrackConfig text_config;
1078 EXPECT_CALL(host_, AddTextStream(_, _))
1079 .WillOnce(DoAll(SaveArg<0>(&text_stream),
1080 SaveArg<1>(&text_config)));
1082 int stream_flags = HAS_TEXT;
1083 if (has_audio)
1084 stream_flags |= HAS_AUDIO;
1086 if (has_video)
1087 stream_flags |= HAS_VIDEO;
1089 ASSERT_TRUE(InitDemuxerWithEncryptionInfo(
1090 stream_flags, is_audio_encrypted, is_video_encrypted));
1091 ASSERT_TRUE(text_stream);
1092 EXPECT_EQ(DemuxerStream::TEXT, text_stream->type());
1093 EXPECT_EQ(kTextSubtitles, text_config.kind());
1094 EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(text_stream)
1095 ->supports_partial_append_window_trimming());
1097 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1098 if (has_audio) {
1099 ASSERT_TRUE(audio_stream);
1101 const AudioDecoderConfig& config = audio_stream->audio_decoder_config();
1102 EXPECT_EQ(kCodecVorbis, config.codec());
1103 EXPECT_EQ(32, config.bits_per_channel());
1104 EXPECT_EQ(CHANNEL_LAYOUT_STEREO, config.channel_layout());
1105 EXPECT_EQ(44100, config.samples_per_second());
1106 EXPECT_TRUE(config.extra_data());
1107 EXPECT_GT(config.extra_data_size(), 0u);
1108 EXPECT_EQ(kSampleFormatPlanarF32, config.sample_format());
1109 EXPECT_EQ(is_audio_encrypted,
1110 audio_stream->audio_decoder_config().is_encrypted());
1111 EXPECT_TRUE(static_cast<ChunkDemuxerStream*>(audio_stream)
1112 ->supports_partial_append_window_trimming());
1113 } else {
1114 EXPECT_FALSE(audio_stream);
1117 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1118 if (has_video) {
1119 EXPECT_TRUE(video_stream);
1120 EXPECT_EQ(is_video_encrypted,
1121 video_stream->video_decoder_config().is_encrypted());
1122 EXPECT_FALSE(static_cast<ChunkDemuxerStream*>(video_stream)
1123 ->supports_partial_append_window_trimming());
1124 } else {
1125 EXPECT_FALSE(video_stream);
1128 ShutdownDemuxer();
1129 demuxer_.reset();
1133 // Make sure that the demuxer reports an error if Shutdown()
1134 // is called before all the initialization segments are appended.
1135 TEST_P(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppended) {
1136 EXPECT_CALL(*this, DemuxerOpened());
1137 demuxer_->Initialize(
1138 &host_, CreateInitDoneCB(
1139 kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1141 EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
1142 EXPECT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
1144 AppendInitSegmentWithSourceId("audio", HAS_AUDIO);
1146 ShutdownDemuxer();
1149 TEST_P(ChunkDemuxerTest, Shutdown_BeforeAllInitSegmentsAppendedText) {
1150 EXPECT_CALL(*this, DemuxerOpened());
1151 demuxer_->Initialize(
1152 &host_, CreateInitDoneCB(
1153 kDefaultDuration(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1155 EXPECT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
1156 EXPECT_EQ(AddId("video_and_text", HAS_VIDEO), ChunkDemuxer::kOk);
1158 EXPECT_CALL(host_, AddTextStream(_, _))
1159 .Times(Exactly(1));
1161 AppendInitSegmentWithSourceId("video_and_text", HAS_VIDEO | HAS_TEXT);
1163 ShutdownDemuxer();
1166 // Verifies that all streams waiting for data receive an end of stream
1167 // buffer when Shutdown() is called.
1168 TEST_P(ChunkDemuxerTest, Shutdown_EndOfStreamWhileWaitingForData) {
1169 DemuxerStream* text_stream = NULL;
1170 EXPECT_CALL(host_, AddTextStream(_, _))
1171 .WillOnce(SaveArg<0>(&text_stream));
1172 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
1174 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
1175 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
1177 bool audio_read_done = false;
1178 bool video_read_done = false;
1179 bool text_read_done = false;
1180 audio_stream->Read(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
1181 video_stream->Read(base::Bind(&OnReadDone_EOSExpected, &video_read_done));
1182 text_stream->Read(base::Bind(&OnReadDone_EOSExpected, &text_read_done));
1183 message_loop_.RunUntilIdle();
1185 EXPECT_FALSE(audio_read_done);
1186 EXPECT_FALSE(video_read_done);
1187 EXPECT_FALSE(text_read_done);
1189 ShutdownDemuxer();
1191 EXPECT_TRUE(audio_read_done);
1192 EXPECT_TRUE(video_read_done);
1193 EXPECT_TRUE(text_read_done);
1196 // Test that Seek() completes successfully when the first cluster
1197 // arrives.
1198 TEST_P(ChunkDemuxerTest, AppendDataAfterSeek) {
1199 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1200 AppendCluster(kDefaultFirstCluster());
1202 InSequence s;
1204 EXPECT_CALL(*this, Checkpoint(1));
1206 Seek(base::TimeDelta::FromMilliseconds(46));
1208 EXPECT_CALL(*this, Checkpoint(2));
1210 Checkpoint(1);
1212 AppendCluster(kDefaultSecondCluster());
1214 message_loop_.RunUntilIdle();
1216 Checkpoint(2);
1219 // Test that parsing errors are handled for clusters appended after init.
1220 TEST_P(ChunkDemuxerTest, ErrorWhileParsingClusterAfterInit) {
1221 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1222 AppendCluster(kDefaultFirstCluster());
1224 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1225 AppendGarbage();
1228 // Test the case where a Seek() is requested while the parser
1229 // is in the middle of cluster. This is to verify that the parser
1230 // does not reset itself on a seek.
1231 TEST_P(ChunkDemuxerTest, SeekWhileParsingCluster) {
1232 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1234 InSequence s;
1236 scoped_ptr<Cluster> cluster_a(GenerateCluster(0, 6));
1238 // Split the cluster into two appends at an arbitrary point near the end.
1239 int first_append_size = cluster_a->size() - 11;
1240 int second_append_size = cluster_a->size() - first_append_size;
1242 // Append the first part of the cluster.
1243 AppendData(cluster_a->data(), first_append_size);
1245 ExpectRead(DemuxerStream::AUDIO, 0);
1246 ExpectRead(DemuxerStream::VIDEO, 0);
1247 ExpectRead(DemuxerStream::AUDIO, kAudioBlockDuration);
1249 Seek(base::TimeDelta::FromSeconds(5));
1251 // Append the rest of the cluster.
1252 AppendData(cluster_a->data() + first_append_size, second_append_size);
1254 // Append the new cluster and verify that only the blocks
1255 // in the new cluster are returned.
1256 AppendCluster(GenerateCluster(5000, 6));
1257 GenerateExpectedReads(5000, 6);
1260 // Test the case where AppendData() is called before Init().
1261 TEST_P(ChunkDemuxerTest, AppendDataBeforeInit) {
1262 scoped_ptr<uint8[]> info_tracks;
1263 int info_tracks_size = 0;
1264 CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
1265 false, false, &info_tracks, &info_tracks_size);
1266 demuxer_->AppendData(kSourceId, info_tracks.get(), info_tracks_size,
1267 append_window_start_for_next_append_,
1268 append_window_end_for_next_append_,
1269 &timestamp_offset_map_[kSourceId]);
1272 // Make sure Read() callbacks are dispatched with the proper data.
1273 TEST_P(ChunkDemuxerTest, Read) {
1274 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1276 AppendCluster(kDefaultFirstCluster());
1278 bool audio_read_done = false;
1279 bool video_read_done = false;
1280 ReadAudio(base::Bind(&OnReadDone,
1281 base::TimeDelta::FromMilliseconds(0),
1282 &audio_read_done));
1283 ReadVideo(base::Bind(&OnReadDone,
1284 base::TimeDelta::FromMilliseconds(0),
1285 &video_read_done));
1287 EXPECT_TRUE(audio_read_done);
1288 EXPECT_TRUE(video_read_done);
1291 TEST_P(ChunkDemuxerTest, OutOfOrderClusters) {
1292 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1293 AppendCluster(kDefaultFirstCluster());
1294 AppendCluster(GenerateCluster(10, 4));
1296 // Make sure that AppendCluster() does not fail with a cluster that has
1297 // overlaps with the previously appended cluster.
1298 AppendCluster(GenerateCluster(5, 4));
1300 // Verify that AppendData() can still accept more data.
1301 scoped_ptr<Cluster> cluster_c(GenerateCluster(45, 2));
1302 demuxer_->AppendData(kSourceId, cluster_c->data(), cluster_c->size(),
1303 append_window_start_for_next_append_,
1304 append_window_end_for_next_append_,
1305 &timestamp_offset_map_[kSourceId]);
1308 TEST_P(ChunkDemuxerTest, NonMonotonicButAboveClusterTimecode) {
1309 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1310 AppendCluster(kDefaultFirstCluster());
1312 ClusterBuilder cb;
1314 // Test the case where block timecodes are not monotonically
1315 // increasing but stay above the cluster timecode.
1316 cb.SetClusterTimecode(5);
1317 AddSimpleBlock(&cb, kAudioTrackNum, 5);
1318 AddSimpleBlock(&cb, kVideoTrackNum, 10);
1319 AddSimpleBlock(&cb, kAudioTrackNum, 7);
1320 AddSimpleBlock(&cb, kVideoTrackNum, 15);
1322 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1323 AppendCluster(cb.Finish());
1325 // Verify that AppendData() ignores data after the error.
1326 scoped_ptr<Cluster> cluster_b(GenerateCluster(20, 2));
1327 demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
1328 append_window_start_for_next_append_,
1329 append_window_end_for_next_append_,
1330 &timestamp_offset_map_[kSourceId]);
1333 TEST_P(ChunkDemuxerTest, BackwardsAndBeforeClusterTimecode) {
1334 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1335 AppendCluster(kDefaultFirstCluster());
1337 ClusterBuilder cb;
1339 // Test timecodes going backwards and including values less than the cluster
1340 // timecode.
1341 cb.SetClusterTimecode(5);
1342 AddSimpleBlock(&cb, kAudioTrackNum, 5);
1343 AddSimpleBlock(&cb, kVideoTrackNum, 5);
1344 AddSimpleBlock(&cb, kAudioTrackNum, 3);
1345 AddSimpleBlock(&cb, kVideoTrackNum, 3);
1347 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1348 AppendCluster(cb.Finish());
1350 // Verify that AppendData() ignores data after the error.
1351 scoped_ptr<Cluster> cluster_b(GenerateCluster(6, 2));
1352 demuxer_->AppendData(kSourceId, cluster_b->data(), cluster_b->size(),
1353 append_window_start_for_next_append_,
1354 append_window_end_for_next_append_,
1355 &timestamp_offset_map_[kSourceId]);
1359 TEST_P(ChunkDemuxerTest, PerStreamMonotonicallyIncreasingTimestamps) {
1360 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1361 AppendCluster(kDefaultFirstCluster());
1363 ClusterBuilder cb;
1365 // Test monotonic increasing timestamps on a per stream
1366 // basis.
1367 cb.SetClusterTimecode(5);
1368 AddSimpleBlock(&cb, kAudioTrackNum, 5);
1369 AddSimpleBlock(&cb, kVideoTrackNum, 5);
1370 AddSimpleBlock(&cb, kAudioTrackNum, 4);
1371 AddSimpleBlock(&cb, kVideoTrackNum, 7);
1373 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1374 AppendCluster(cb.Finish());
1377 // Test the case where a cluster is passed to AppendCluster() before
1378 // INFO & TRACKS data.
1379 TEST_P(ChunkDemuxerTest, ClusterBeforeInitSegment) {
1380 EXPECT_CALL(*this, DemuxerOpened());
1381 demuxer_->Initialize(
1382 &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1384 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1386 AppendCluster(GenerateCluster(0, 1));
1389 // Test cases where we get an MarkEndOfStream() call during initialization.
1390 TEST_P(ChunkDemuxerTest, EOSDuringInit) {
1391 EXPECT_CALL(*this, DemuxerOpened());
1392 demuxer_->Initialize(
1393 &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1394 MarkEndOfStream(PIPELINE_OK);
1397 TEST_P(ChunkDemuxerTest, EndOfStreamWithNoAppend) {
1398 EXPECT_CALL(*this, DemuxerOpened());
1399 demuxer_->Initialize(
1400 &host_, NewExpectedStatusCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
1402 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1404 CheckExpectedRanges("{ }");
1405 MarkEndOfStream(PIPELINE_OK);
1406 ShutdownDemuxer();
1407 CheckExpectedRanges("{ }");
1408 demuxer_->RemoveId(kSourceId);
1409 demuxer_.reset();
1412 TEST_P(ChunkDemuxerTest, EndOfStreamWithNoMediaAppend) {
1413 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1415 CheckExpectedRanges("{ }");
1416 MarkEndOfStream(PIPELINE_OK);
1417 CheckExpectedRanges("{ }");
1420 TEST_P(ChunkDemuxerTest, DecodeErrorEndOfStream) {
1421 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1423 AppendCluster(kDefaultFirstCluster());
1424 CheckExpectedRanges(kDefaultFirstClusterRange);
1426 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
1427 MarkEndOfStream(PIPELINE_ERROR_DECODE);
1428 CheckExpectedRanges(kDefaultFirstClusterRange);
1431 TEST_P(ChunkDemuxerTest, NetworkErrorEndOfStream) {
1432 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1434 AppendCluster(kDefaultFirstCluster());
1435 CheckExpectedRanges(kDefaultFirstClusterRange);
1437 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_NETWORK));
1438 MarkEndOfStream(PIPELINE_ERROR_NETWORK);
1441 // Helper class to reduce duplicate code when testing end of stream
1442 // Read() behavior.
1443 class EndOfStreamHelper {
1444 public:
1445 explicit EndOfStreamHelper(Demuxer* demuxer)
1446 : demuxer_(demuxer),
1447 audio_read_done_(false),
1448 video_read_done_(false) {
1451 // Request a read on the audio and video streams.
1452 void RequestReads() {
1453 EXPECT_FALSE(audio_read_done_);
1454 EXPECT_FALSE(video_read_done_);
1456 DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
1457 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
1459 audio->Read(base::Bind(&OnEndOfStreamReadDone, &audio_read_done_));
1460 video->Read(base::Bind(&OnEndOfStreamReadDone, &video_read_done_));
1461 base::MessageLoop::current()->RunUntilIdle();
1464 // Check to see if |audio_read_done_| and |video_read_done_| variables
1465 // match |expected|.
1466 void CheckIfReadDonesWereCalled(bool expected) {
1467 base::MessageLoop::current()->RunUntilIdle();
1468 EXPECT_EQ(expected, audio_read_done_);
1469 EXPECT_EQ(expected, video_read_done_);
1472 private:
1473 static void OnEndOfStreamReadDone(
1474 bool* called,
1475 DemuxerStream::Status status,
1476 const scoped_refptr<DecoderBuffer>& buffer) {
1477 EXPECT_EQ(status, DemuxerStream::kOk);
1478 EXPECT_TRUE(buffer->end_of_stream());
1479 *called = true;
1482 Demuxer* demuxer_;
1483 bool audio_read_done_;
1484 bool video_read_done_;
1486 DISALLOW_COPY_AND_ASSIGN(EndOfStreamHelper);
1489 // Make sure that all pending reads that we don't have media data for get an
1490 // "end of stream" buffer when MarkEndOfStream() is called.
1491 TEST_P(ChunkDemuxerTest, EndOfStreamWithPendingReads) {
1492 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1494 AppendCluster(GenerateCluster(0, 2));
1496 bool audio_read_done_1 = false;
1497 bool video_read_done_1 = false;
1498 EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
1499 EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
1501 ReadAudio(base::Bind(&OnReadDone,
1502 base::TimeDelta::FromMilliseconds(0),
1503 &audio_read_done_1));
1504 ReadVideo(base::Bind(&OnReadDone,
1505 base::TimeDelta::FromMilliseconds(0),
1506 &video_read_done_1));
1507 message_loop_.RunUntilIdle();
1509 EXPECT_TRUE(audio_read_done_1);
1510 EXPECT_TRUE(video_read_done_1);
1512 end_of_stream_helper_1.RequestReads();
1514 EXPECT_CALL(host_, SetDuration(
1515 base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
1516 MarkEndOfStream(PIPELINE_OK);
1518 end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
1520 end_of_stream_helper_2.RequestReads();
1521 end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
1524 // Make sure that all Read() calls after we get an MarkEndOfStream()
1525 // call return an "end of stream" buffer.
1526 TEST_P(ChunkDemuxerTest, ReadsAfterEndOfStream) {
1527 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1529 AppendCluster(GenerateCluster(0, 2));
1531 bool audio_read_done_1 = false;
1532 bool video_read_done_1 = false;
1533 EndOfStreamHelper end_of_stream_helper_1(demuxer_.get());
1534 EndOfStreamHelper end_of_stream_helper_2(demuxer_.get());
1535 EndOfStreamHelper end_of_stream_helper_3(demuxer_.get());
1537 ReadAudio(base::Bind(&OnReadDone,
1538 base::TimeDelta::FromMilliseconds(0),
1539 &audio_read_done_1));
1540 ReadVideo(base::Bind(&OnReadDone,
1541 base::TimeDelta::FromMilliseconds(0),
1542 &video_read_done_1));
1544 end_of_stream_helper_1.RequestReads();
1546 EXPECT_TRUE(audio_read_done_1);
1547 EXPECT_TRUE(video_read_done_1);
1548 end_of_stream_helper_1.CheckIfReadDonesWereCalled(false);
1550 EXPECT_CALL(host_, SetDuration(
1551 base::TimeDelta::FromMilliseconds(kVideoBlockDuration)));
1552 MarkEndOfStream(PIPELINE_OK);
1554 end_of_stream_helper_1.CheckIfReadDonesWereCalled(true);
1556 // Request a few more reads and make sure we immediately get
1557 // end of stream buffers.
1558 end_of_stream_helper_2.RequestReads();
1559 end_of_stream_helper_2.CheckIfReadDonesWereCalled(true);
1561 end_of_stream_helper_3.RequestReads();
1562 end_of_stream_helper_3.CheckIfReadDonesWereCalled(true);
1565 TEST_P(ChunkDemuxerTest, EndOfStreamDuringCanceledSeek) {
1566 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1568 AppendCluster(0, 10);
1569 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(138)));
1570 MarkEndOfStream(PIPELINE_OK);
1572 // Start the first seek.
1573 Seek(base::TimeDelta::FromMilliseconds(20));
1575 // Simulate another seek being requested before the first
1576 // seek has finished prerolling.
1577 base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(30);
1578 demuxer_->CancelPendingSeek(seek_time2);
1580 // Finish second seek.
1581 Seek(seek_time2);
1583 DemuxerStream::Status status;
1584 base::TimeDelta last_timestamp;
1586 // Make sure audio can reach end of stream.
1587 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
1588 ASSERT_EQ(status, DemuxerStream::kOk);
1590 // Make sure video can reach end of stream.
1591 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
1592 ASSERT_EQ(status, DemuxerStream::kOk);
1595 // Verify buffered range change behavior for audio/video/text tracks.
1596 TEST_P(ChunkDemuxerTest, EndOfStreamRangeChanges) {
1597 DemuxerStream* text_stream = NULL;
1599 EXPECT_CALL(host_, AddTextStream(_, _))
1600 .WillOnce(SaveArg<0>(&text_stream));
1601 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
1603 AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 33");
1604 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23K");
1606 // Check expected ranges and verify that an empty text track does not
1607 // affect the expected ranges.
1608 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1610 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
1611 MarkEndOfStream(PIPELINE_OK);
1613 // Check expected ranges and verify that an empty text track does not
1614 // affect the expected ranges.
1615 CheckExpectedRanges(kSourceId, "{ [0,66) }");
1617 // Unmark end of stream state and verify that the ranges return to
1618 // their pre-"end of stream" values.
1619 demuxer_->UnmarkEndOfStream();
1620 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1622 // Add text track data and verify that the buffered ranges don't change
1623 // since the intersection of all the tracks doesn't change.
1624 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(200)));
1625 AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K");
1626 CheckExpectedRanges(kSourceId, "{ [0,46) }");
1628 // Mark end of stream and verify that text track data is reflected in
1629 // the new range.
1630 MarkEndOfStream(PIPELINE_OK);
1631 CheckExpectedRanges(kSourceId, "{ [0,200) }");
1634 // Make sure AppendData() will accept elements that span multiple calls.
1635 TEST_P(ChunkDemuxerTest, AppendingInPieces) {
1636 EXPECT_CALL(*this, DemuxerOpened());
1637 demuxer_->Initialize(
1638 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
1640 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1642 scoped_ptr<uint8[]> info_tracks;
1643 int info_tracks_size = 0;
1644 CreateInitSegment(HAS_AUDIO | HAS_VIDEO,
1645 false, false, &info_tracks, &info_tracks_size);
1647 scoped_ptr<Cluster> cluster_a(kDefaultFirstCluster());
1648 scoped_ptr<Cluster> cluster_b(kDefaultSecondCluster());
1650 size_t buffer_size = info_tracks_size + cluster_a->size() + cluster_b->size();
1651 scoped_ptr<uint8[]> buffer(new uint8[buffer_size]);
1652 uint8* dst = buffer.get();
1653 memcpy(dst, info_tracks.get(), info_tracks_size);
1654 dst += info_tracks_size;
1656 memcpy(dst, cluster_a->data(), cluster_a->size());
1657 dst += cluster_a->size();
1659 memcpy(dst, cluster_b->data(), cluster_b->size());
1660 dst += cluster_b->size();
1662 AppendDataInPieces(buffer.get(), buffer_size);
1664 GenerateExpectedReads(0, 9);
1667 TEST_P(ChunkDemuxerTest, WebMFile_AudioAndVideo) {
1668 struct BufferTimestamps buffer_timestamps[] = {
1669 {0, 0},
1670 {33, 3},
1671 {67, 6},
1672 {100, 9},
1673 {133, 12},
1674 {kSkip, kSkip},
1677 // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
1678 // ParseWebMFile() call's expected duration, below, once the file is fixed to
1679 // have the correct duration in the init segment. See http://crbug.com/354284.
1680 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
1682 ASSERT_TRUE(ParseWebMFile("bear-320x240.webm", buffer_timestamps,
1683 base::TimeDelta::FromMilliseconds(2744)));
1686 TEST_P(ChunkDemuxerTest, WebMFile_LiveAudioAndVideo) {
1687 struct BufferTimestamps buffer_timestamps[] = {
1688 {0, 0},
1689 {33, 3},
1690 {67, 6},
1691 {100, 9},
1692 {133, 12},
1693 {kSkip, kSkip},
1696 ASSERT_TRUE(ParseWebMFile("bear-320x240-live.webm", buffer_timestamps,
1697 kInfiniteDuration()));
1700 TEST_P(ChunkDemuxerTest, WebMFile_AudioOnly) {
1701 struct BufferTimestamps buffer_timestamps[] = {
1702 {kSkip, 0},
1703 {kSkip, 3},
1704 {kSkip, 6},
1705 {kSkip, 9},
1706 {kSkip, 12},
1707 {kSkip, kSkip},
1710 // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
1711 // ParseWebMFile() call's expected duration, below, once the file is fixed to
1712 // have the correct duration in the init segment. See http://crbug.com/354284.
1713 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2746)));
1715 ASSERT_TRUE(ParseWebMFile("bear-320x240-audio-only.webm", buffer_timestamps,
1716 base::TimeDelta::FromMilliseconds(2744),
1717 HAS_AUDIO));
1720 TEST_P(ChunkDemuxerTest, WebMFile_VideoOnly) {
1721 struct BufferTimestamps buffer_timestamps[] = {
1722 {0, kSkip},
1723 {33, kSkip},
1724 {67, kSkip},
1725 {100, kSkip},
1726 {133, kSkip},
1727 {kSkip, kSkip},
1730 // TODO(wolenetz/acolwell): Remove this SetDuration expectation and update the
1731 // ParseWebMFile() call's expected duration, below, once the file is fixed to
1732 // have the correct duration in the init segment. See http://crbug.com/354284.
1733 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(2736)));
1735 ASSERT_TRUE(ParseWebMFile("bear-320x240-video-only.webm", buffer_timestamps,
1736 base::TimeDelta::FromMilliseconds(2703),
1737 HAS_VIDEO));
1740 TEST_P(ChunkDemuxerTest, WebMFile_AltRefFrames) {
1741 struct BufferTimestamps buffer_timestamps[] = {
1742 {0, 0},
1743 {33, 3},
1744 {33, 6},
1745 {67, 9},
1746 {100, 12},
1747 {kSkip, kSkip},
1750 ASSERT_TRUE(ParseWebMFile("bear-320x240-altref.webm", buffer_timestamps,
1751 base::TimeDelta::FromMilliseconds(2767)));
1754 // Verify that we output buffers before the entire cluster has been parsed.
1755 TEST_P(ChunkDemuxerTest, IncrementalClusterParsing) {
1756 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1757 AppendEmptyCluster(0);
1759 scoped_ptr<Cluster> cluster(GenerateCluster(0, 6));
1761 bool audio_read_done = false;
1762 bool video_read_done = false;
1763 ReadAudio(base::Bind(&OnReadDone,
1764 base::TimeDelta::FromMilliseconds(0),
1765 &audio_read_done));
1766 ReadVideo(base::Bind(&OnReadDone,
1767 base::TimeDelta::FromMilliseconds(0),
1768 &video_read_done));
1770 // Make sure the reads haven't completed yet.
1771 EXPECT_FALSE(audio_read_done);
1772 EXPECT_FALSE(video_read_done);
1774 // Append data one byte at a time until one or both reads complete.
1775 int i = 0;
1776 for (; i < cluster->size() && !(audio_read_done || video_read_done); ++i) {
1777 AppendData(cluster->data() + i, 1);
1778 message_loop_.RunUntilIdle();
1781 EXPECT_TRUE(audio_read_done || video_read_done);
1782 EXPECT_GT(i, 0);
1783 EXPECT_LT(i, cluster->size());
1785 audio_read_done = false;
1786 video_read_done = false;
1787 ReadAudio(base::Bind(&OnReadDone,
1788 base::TimeDelta::FromMilliseconds(23),
1789 &audio_read_done));
1790 ReadVideo(base::Bind(&OnReadDone,
1791 base::TimeDelta::FromMilliseconds(33),
1792 &video_read_done));
1794 // Make sure the reads haven't completed yet.
1795 EXPECT_FALSE(audio_read_done);
1796 EXPECT_FALSE(video_read_done);
1798 // Append the remaining data.
1799 ASSERT_LT(i, cluster->size());
1800 AppendData(cluster->data() + i, cluster->size() - i);
1802 message_loop_.RunUntilIdle();
1804 EXPECT_TRUE(audio_read_done);
1805 EXPECT_TRUE(video_read_done);
1808 TEST_P(ChunkDemuxerTest, ParseErrorDuringInit) {
1809 EXPECT_CALL(*this, DemuxerOpened());
1810 demuxer_->Initialize(
1811 &host_, CreateInitDoneCB(
1812 kNoTimestamp(), DEMUXER_ERROR_COULD_NOT_OPEN), true);
1814 ASSERT_EQ(AddId(), ChunkDemuxer::kOk);
1816 uint8 tmp = 0;
1817 demuxer_->AppendData(kSourceId, &tmp, 1,
1818 append_window_start_for_next_append_,
1819 append_window_end_for_next_append_,
1820 &timestamp_offset_map_[kSourceId]);
1823 TEST_P(ChunkDemuxerTest, AVHeadersWithAudioOnlyType) {
1824 EXPECT_CALL(*this, DemuxerOpened());
1825 demuxer_->Initialize(
1826 &host_, CreateInitDoneCB(kNoTimestamp(),
1827 DEMUXER_ERROR_COULD_NOT_OPEN), true);
1829 std::vector<std::string> codecs(1);
1830 codecs[0] = "vorbis";
1831 ASSERT_EQ(demuxer_->AddId(kSourceId, "audio/webm", codecs,
1832 use_legacy_frame_processor_),
1833 ChunkDemuxer::kOk);
1835 AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
1838 TEST_P(ChunkDemuxerTest, AVHeadersWithVideoOnlyType) {
1839 EXPECT_CALL(*this, DemuxerOpened());
1840 demuxer_->Initialize(
1841 &host_, CreateInitDoneCB(kNoTimestamp(),
1842 DEMUXER_ERROR_COULD_NOT_OPEN), true);
1844 std::vector<std::string> codecs(1);
1845 codecs[0] = "vp8";
1846 ASSERT_EQ(demuxer_->AddId(kSourceId, "video/webm", codecs,
1847 use_legacy_frame_processor_),
1848 ChunkDemuxer::kOk);
1850 AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
1853 TEST_P(ChunkDemuxerTest, MultipleHeaders) {
1854 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1856 AppendCluster(kDefaultFirstCluster());
1858 // Append another identical initialization segment.
1859 AppendInitSegment(HAS_AUDIO | HAS_VIDEO);
1861 AppendCluster(kDefaultSecondCluster());
1863 GenerateExpectedReads(0, 9);
1866 TEST_P(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideo) {
1867 std::string audio_id = "audio1";
1868 std::string video_id = "video1";
1869 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
1871 // Append audio and video data into separate source ids.
1872 AppendCluster(audio_id,
1873 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
1874 GenerateAudioStreamExpectedReads(0, 4);
1875 AppendCluster(video_id,
1876 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
1877 GenerateVideoStreamExpectedReads(0, 4);
1880 TEST_P(ChunkDemuxerTest, AddSeparateSourcesForAudioAndVideoText) {
1881 // TODO(matthewjheaney): Here and elsewhere, we need more tests
1882 // for inband text tracks (http://crbug/321455).
1884 std::string audio_id = "audio1";
1885 std::string video_id = "video1";
1887 EXPECT_CALL(host_, AddTextStream(_, _))
1888 .Times(Exactly(2));
1889 ASSERT_TRUE(InitDemuxerAudioAndVideoSourcesText(audio_id, video_id, true));
1891 // Append audio and video data into separate source ids.
1892 AppendCluster(audio_id,
1893 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
1894 GenerateAudioStreamExpectedReads(0, 4);
1895 AppendCluster(video_id,
1896 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
1897 GenerateVideoStreamExpectedReads(0, 4);
1900 TEST_P(ChunkDemuxerTest, AddIdFailures) {
1901 EXPECT_CALL(*this, DemuxerOpened());
1902 demuxer_->Initialize(
1903 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
1905 std::string audio_id = "audio1";
1906 std::string video_id = "video1";
1908 ASSERT_EQ(AddId(audio_id, HAS_AUDIO), ChunkDemuxer::kOk);
1910 // Adding an id with audio/video should fail because we already added audio.
1911 ASSERT_EQ(AddId(), ChunkDemuxer::kReachedIdLimit);
1913 AppendInitSegmentWithSourceId(audio_id, HAS_AUDIO);
1915 // Adding an id after append should fail.
1916 ASSERT_EQ(AddId(video_id, HAS_VIDEO), ChunkDemuxer::kReachedIdLimit);
1919 // Test that Read() calls after a RemoveId() return "end of stream" buffers.
1920 TEST_P(ChunkDemuxerTest, RemoveId) {
1921 std::string audio_id = "audio1";
1922 std::string video_id = "video1";
1923 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
1925 // Append audio and video data into separate source ids.
1926 AppendCluster(audio_id,
1927 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
1928 AppendCluster(video_id,
1929 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
1931 // Read() from audio should return normal buffers.
1932 GenerateAudioStreamExpectedReads(0, 4);
1934 // Remove the audio id.
1935 demuxer_->RemoveId(audio_id);
1937 // Read() from audio should return "end of stream" buffers.
1938 bool audio_read_done = false;
1939 ReadAudio(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
1940 message_loop_.RunUntilIdle();
1941 EXPECT_TRUE(audio_read_done);
1943 // Read() from video should still return normal buffers.
1944 GenerateVideoStreamExpectedReads(0, 4);
1947 // Test that removing an ID immediately after adding it does not interfere with
1948 // quota for new IDs in the future.
1949 TEST_P(ChunkDemuxerTest, RemoveAndAddId) {
1950 std::string audio_id_1 = "audio1";
1951 ASSERT_TRUE(AddId(audio_id_1, HAS_AUDIO) == ChunkDemuxer::kOk);
1952 demuxer_->RemoveId(audio_id_1);
1954 std::string audio_id_2 = "audio2";
1955 ASSERT_TRUE(AddId(audio_id_2, HAS_AUDIO) == ChunkDemuxer::kOk);
1958 TEST_P(ChunkDemuxerTest, SeekCanceled) {
1959 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1961 // Append cluster at the beginning of the stream.
1962 AppendCluster(GenerateCluster(0, 4));
1964 // Seek to an unbuffered region.
1965 Seek(base::TimeDelta::FromSeconds(50));
1967 // Attempt to read in unbuffered area; should not fulfill the read.
1968 bool audio_read_done = false;
1969 bool video_read_done = false;
1970 ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
1971 ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
1972 EXPECT_FALSE(audio_read_done);
1973 EXPECT_FALSE(video_read_done);
1975 // Now cancel the pending seek, which should flush the reads with empty
1976 // buffers.
1977 base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
1978 demuxer_->CancelPendingSeek(seek_time);
1979 message_loop_.RunUntilIdle();
1980 EXPECT_TRUE(audio_read_done);
1981 EXPECT_TRUE(video_read_done);
1983 // A seek back to the buffered region should succeed.
1984 Seek(seek_time);
1985 GenerateExpectedReads(0, 4);
1988 TEST_P(ChunkDemuxerTest, SeekCanceledWhileWaitingForSeek) {
1989 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
1991 // Append cluster at the beginning of the stream.
1992 AppendCluster(GenerateCluster(0, 4));
1994 // Start waiting for a seek.
1995 base::TimeDelta seek_time1 = base::TimeDelta::FromSeconds(50);
1996 base::TimeDelta seek_time2 = base::TimeDelta::FromSeconds(0);
1997 demuxer_->StartWaitingForSeek(seek_time1);
1999 // Now cancel the upcoming seek to an unbuffered region.
2000 demuxer_->CancelPendingSeek(seek_time2);
2001 demuxer_->Seek(seek_time1, NewExpectedStatusCB(PIPELINE_OK));
2003 // Read requests should be fulfilled with empty buffers.
2004 bool audio_read_done = false;
2005 bool video_read_done = false;
2006 ReadAudio(base::Bind(&OnReadDone_AbortExpected, &audio_read_done));
2007 ReadVideo(base::Bind(&OnReadDone_AbortExpected, &video_read_done));
2008 EXPECT_TRUE(audio_read_done);
2009 EXPECT_TRUE(video_read_done);
2011 // A seek back to the buffered region should succeed.
2012 Seek(seek_time2);
2013 GenerateExpectedReads(0, 4);
2016 // Test that Seek() successfully seeks to all source IDs.
2017 TEST_P(ChunkDemuxerTest, SeekAudioAndVideoSources) {
2018 std::string audio_id = "audio1";
2019 std::string video_id = "video1";
2020 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2022 AppendCluster(
2023 audio_id,
2024 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2025 AppendCluster(
2026 video_id,
2027 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2029 // Read() should return buffers at 0.
2030 bool audio_read_done = false;
2031 bool video_read_done = false;
2032 ReadAudio(base::Bind(&OnReadDone,
2033 base::TimeDelta::FromMilliseconds(0),
2034 &audio_read_done));
2035 ReadVideo(base::Bind(&OnReadDone,
2036 base::TimeDelta::FromMilliseconds(0),
2037 &video_read_done));
2038 EXPECT_TRUE(audio_read_done);
2039 EXPECT_TRUE(video_read_done);
2041 // Seek to 3 (an unbuffered region).
2042 Seek(base::TimeDelta::FromSeconds(3));
2044 audio_read_done = false;
2045 video_read_done = false;
2046 ReadAudio(base::Bind(&OnReadDone,
2047 base::TimeDelta::FromSeconds(3),
2048 &audio_read_done));
2049 ReadVideo(base::Bind(&OnReadDone,
2050 base::TimeDelta::FromSeconds(3),
2051 &video_read_done));
2052 // Read()s should not return until after data is appended at the Seek point.
2053 EXPECT_FALSE(audio_read_done);
2054 EXPECT_FALSE(video_read_done);
2056 AppendCluster(audio_id,
2057 GenerateSingleStreamCluster(
2058 3000, 3092, kAudioTrackNum, kAudioBlockDuration));
2059 AppendCluster(video_id,
2060 GenerateSingleStreamCluster(
2061 3000, 3132, kVideoTrackNum, kVideoBlockDuration));
2063 message_loop_.RunUntilIdle();
2065 // Read() should return buffers at 3.
2066 EXPECT_TRUE(audio_read_done);
2067 EXPECT_TRUE(video_read_done);
2070 // Test that Seek() completes successfully when EndOfStream
2071 // is called before data is available for that seek point.
2072 // This scenario might be useful if seeking past the end of stream
2073 // of either audio or video (or both).
2074 TEST_P(ChunkDemuxerTest, EndOfStreamAfterPastEosSeek) {
2075 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2077 AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
2078 AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
2080 // Seeking past the end of video.
2081 // Note: audio data is available for that seek point.
2082 bool seek_cb_was_called = false;
2083 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(110);
2084 demuxer_->StartWaitingForSeek(seek_time);
2085 demuxer_->Seek(seek_time,
2086 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
2087 message_loop_.RunUntilIdle();
2089 EXPECT_FALSE(seek_cb_was_called);
2091 EXPECT_CALL(host_, SetDuration(
2092 base::TimeDelta::FromMilliseconds(120)));
2093 MarkEndOfStream(PIPELINE_OK);
2094 message_loop_.RunUntilIdle();
2096 EXPECT_TRUE(seek_cb_was_called);
2098 ShutdownDemuxer();
2101 // Test that EndOfStream is ignored if coming during a pending seek
2102 // whose seek time is before some existing ranges.
2103 TEST_P(ChunkDemuxerTest, EndOfStreamDuringPendingSeek) {
2104 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2106 AppendCluster(GenerateSingleStreamCluster(0, 120, kAudioTrackNum, 10));
2107 AppendCluster(GenerateSingleStreamCluster(0, 100, kVideoTrackNum, 5));
2108 AppendCluster(GenerateSingleStreamCluster(200, 300, kAudioTrackNum, 10));
2109 AppendCluster(GenerateSingleStreamCluster(200, 300, kVideoTrackNum, 5));
2111 bool seek_cb_was_called = false;
2112 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(160);
2113 demuxer_->StartWaitingForSeek(seek_time);
2114 demuxer_->Seek(seek_time,
2115 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
2116 message_loop_.RunUntilIdle();
2118 EXPECT_FALSE(seek_cb_was_called);
2120 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(300)));
2121 MarkEndOfStream(PIPELINE_OK);
2122 message_loop_.RunUntilIdle();
2124 EXPECT_FALSE(seek_cb_was_called);
2126 demuxer_->UnmarkEndOfStream();
2128 AppendCluster(GenerateSingleStreamCluster(140, 180, kAudioTrackNum, 10));
2129 AppendCluster(GenerateSingleStreamCluster(140, 180, kVideoTrackNum, 5));
2131 message_loop_.RunUntilIdle();
2133 EXPECT_TRUE(seek_cb_was_called);
2135 ShutdownDemuxer();
2138 // Test ranges in an audio-only stream.
2139 TEST_P(ChunkDemuxerTest, GetBufferedRanges_AudioIdOnly) {
2140 EXPECT_CALL(*this, DemuxerOpened());
2141 demuxer_->Initialize(
2142 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2144 ASSERT_EQ(AddId(kSourceId, HAS_AUDIO), ChunkDemuxer::kOk);
2145 AppendInitSegment(HAS_AUDIO);
2147 // Test a simple cluster.
2148 AppendCluster(
2149 GenerateSingleStreamCluster(0, 92, kAudioTrackNum, kAudioBlockDuration));
2151 CheckExpectedRanges("{ [0,92) }");
2153 // Append a disjoint cluster to check for two separate ranges.
2154 AppendCluster(GenerateSingleStreamCluster(
2155 150, 219, kAudioTrackNum, kAudioBlockDuration));
2157 CheckExpectedRanges("{ [0,92) [150,219) }");
2160 // Test ranges in a video-only stream.
2161 TEST_P(ChunkDemuxerTest, GetBufferedRanges_VideoIdOnly) {
2162 EXPECT_CALL(*this, DemuxerOpened());
2163 demuxer_->Initialize(
2164 &host_, CreateInitDoneCB(kDefaultDuration(), PIPELINE_OK), true);
2166 ASSERT_EQ(AddId(kSourceId, HAS_VIDEO), ChunkDemuxer::kOk);
2167 AppendInitSegment(HAS_VIDEO);
2169 // Test a simple cluster.
2170 AppendCluster(
2171 GenerateSingleStreamCluster(0, 132, kVideoTrackNum, kVideoBlockDuration));
2173 CheckExpectedRanges("{ [0,132) }");
2175 // Append a disjoint cluster to check for two separate ranges.
2176 AppendCluster(GenerateSingleStreamCluster(
2177 200, 299, kVideoTrackNum, kVideoBlockDuration));
2179 CheckExpectedRanges("{ [0,132) [200,299) }");
2182 TEST_P(ChunkDemuxerTest, GetBufferedRanges_AudioVideo) {
2183 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2185 // Audio: 0 -> 23
2186 // Video: 0 -> 33
2187 // Buffered Range: 0 -> 23
2188 // Audio block duration is smaller than video block duration,
2189 // so the buffered ranges should correspond to the audio blocks.
2190 AppendCluster(GenerateSingleStreamCluster(
2191 0, kAudioBlockDuration, kAudioTrackNum, kAudioBlockDuration));
2192 AppendCluster(GenerateSingleStreamCluster(
2193 0, kVideoBlockDuration, kVideoTrackNum, kVideoBlockDuration));
2195 CheckExpectedRanges("{ [0,23) }");
2197 // Audio: 300 -> 400
2198 // Video: 320 -> 420
2199 // Buffered Range: 320 -> 400 (end overlap)
2200 AppendCluster(GenerateSingleStreamCluster(300, 400, kAudioTrackNum, 50));
2201 AppendCluster(GenerateSingleStreamCluster(320, 420, kVideoTrackNum, 50));
2203 CheckExpectedRanges("{ [0,23) [320,400) }");
2205 // Audio: 520 -> 590
2206 // Video: 500 -> 570
2207 // Buffered Range: 520 -> 570 (front overlap)
2208 AppendCluster(GenerateSingleStreamCluster(520, 590, kAudioTrackNum, 70));
2209 AppendCluster(GenerateSingleStreamCluster(500, 570, kVideoTrackNum, 70));
2211 CheckExpectedRanges("{ [0,23) [320,400) [520,570) }");
2213 // Audio: 720 -> 750
2214 // Video: 700 -> 770
2215 // Buffered Range: 720 -> 750 (complete overlap, audio)
2216 AppendCluster(GenerateSingleStreamCluster(720, 750, kAudioTrackNum, 30));
2217 AppendCluster(GenerateSingleStreamCluster(700, 770, kVideoTrackNum, 70));
2219 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) }");
2221 // Audio: 900 -> 970
2222 // Video: 920 -> 950
2223 // Buffered Range: 920 -> 950 (complete overlap, video)
2224 AppendCluster(GenerateSingleStreamCluster(900, 970, kAudioTrackNum, 70));
2225 AppendCluster(GenerateSingleStreamCluster(920, 950, kVideoTrackNum, 30));
2227 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2229 // Appending within buffered range should not affect buffered ranges.
2230 AppendCluster(GenerateSingleStreamCluster(930, 950, kAudioTrackNum, 20));
2231 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2233 // Appending to single stream outside buffered ranges should not affect
2234 // buffered ranges.
2235 AppendCluster(GenerateSingleStreamCluster(1230, 1240, kVideoTrackNum, 10));
2236 CheckExpectedRanges("{ [0,23) [320,400) [520,570) [720,750) [920,950) }");
2239 TEST_P(ChunkDemuxerTest, GetBufferedRanges_AudioVideoText) {
2240 EXPECT_CALL(host_, AddTextStream(_, _));
2241 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
2243 // Append audio & video data
2244 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23");
2245 AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 33");
2247 // Verify that a text track with no cues does not result in an empty buffered
2248 // range.
2249 CheckExpectedRanges("{ [0,46) }");
2251 // Add some text cues.
2252 AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K");
2254 // Verify that the new cues did not affect the buffered ranges.
2255 CheckExpectedRanges("{ [0,46) }");
2257 // Remove the buffered range.
2258 demuxer_->Remove(kSourceId, base::TimeDelta(),
2259 base::TimeDelta::FromMilliseconds(46));
2260 CheckExpectedRanges("{ }");
2263 // Once MarkEndOfStream() is called, GetBufferedRanges should not cut off any
2264 // over-hanging tails at the end of the ranges as this is likely due to block
2265 // duration differences.
2266 TEST_P(ChunkDemuxerTest, GetBufferedRanges_EndOfStream) {
2267 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2269 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K 23K");
2270 AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "0K 33");
2272 CheckExpectedRanges("{ [0,46) }");
2274 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(66)));
2275 MarkEndOfStream(PIPELINE_OK);
2277 // Verify that the range extends to the end of the video data.
2278 CheckExpectedRanges("{ [0,66) }");
2280 // Verify that the range reverts to the intersection when end of stream
2281 // has been cancelled.
2282 demuxer_->UnmarkEndOfStream();
2283 CheckExpectedRanges("{ [0,46) }");
2285 // Append and remove data so that the 2 streams' end ranges do not overlap.
2287 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(246)));
2288 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(398)));
2289 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "200K 223K");
2290 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
2291 "200K 233 266 299 332K 365");
2293 // At this point, the per-stream ranges are as follows:
2294 // Audio: [0,46) [200,246)
2295 // Video: [0,66) [200,398)
2296 CheckExpectedRanges("{ [0,46) [200,246) }");
2298 demuxer_->Remove(kSourceId, base::TimeDelta::FromMilliseconds(200),
2299 base::TimeDelta::FromMilliseconds(300));
2301 // At this point, the per-stream ranges are as follows:
2302 // Audio: [0,46)
2303 // Video: [0,66) [332,398)
2304 CheckExpectedRanges("{ [0,46) }");
2306 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "200K 223K");
2307 AppendSingleStreamCluster(kSourceId, kVideoTrackNum, "200K 233");
2309 // At this point, the per-stream ranges are as follows:
2310 // Audio: [0,46) [200,246)
2311 // Video: [0,66) [200,266) [332,398)
2312 // NOTE: The last range on each stream do not overlap in time.
2313 CheckExpectedRanges("{ [0,46) [200,246) }");
2315 MarkEndOfStream(PIPELINE_OK);
2317 // NOTE: The last range on each stream gets extended to the highest
2318 // end timestamp according to the spec. The last audio range gets extended
2319 // from [200,246) to [200,398) which is why the intersection results in the
2320 // middle range getting larger AND the new range appearing.
2321 CheckExpectedRanges("{ [0,46) [200,266) [332,398) }");
2324 TEST_P(ChunkDemuxerTest, DifferentStreamTimecodes) {
2325 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2327 // Create a cluster where the video timecode begins 25ms after the audio.
2328 AppendCluster(GenerateCluster(0, 25, 8));
2330 Seek(base::TimeDelta::FromSeconds(0));
2331 GenerateExpectedReads(0, 25, 8);
2333 // Seek to 5 seconds.
2334 Seek(base::TimeDelta::FromSeconds(5));
2336 // Generate a cluster to fulfill this seek, where audio timecode begins 25ms
2337 // after the video.
2338 AppendCluster(GenerateCluster(5025, 5000, 8));
2339 GenerateExpectedReads(5025, 5000, 8);
2342 TEST_P(ChunkDemuxerTest, DifferentStreamTimecodesSeparateSources) {
2343 std::string audio_id = "audio1";
2344 std::string video_id = "video1";
2345 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2347 // Generate two streams where the video stream starts 5ms after the audio
2348 // stream and append them.
2349 AppendCluster(audio_id, GenerateSingleStreamCluster(
2350 25, 4 * kAudioBlockDuration + 25, kAudioTrackNum, kAudioBlockDuration));
2351 AppendCluster(video_id, GenerateSingleStreamCluster(
2352 30, 4 * kVideoBlockDuration + 30, kVideoTrackNum, kVideoBlockDuration));
2354 // Both streams should be able to fulfill a seek to 25.
2355 Seek(base::TimeDelta::FromMilliseconds(25));
2356 GenerateAudioStreamExpectedReads(25, 4);
2357 GenerateVideoStreamExpectedReads(30, 4);
2360 TEST_P(ChunkDemuxerTest, DifferentStreamTimecodesOutOfRange) {
2361 std::string audio_id = "audio1";
2362 std::string video_id = "video1";
2363 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2365 // Generate two streams where the video stream starts 10s after the audio
2366 // stream and append them.
2367 AppendCluster(audio_id, GenerateSingleStreamCluster(0,
2368 4 * kAudioBlockDuration + 0, kAudioTrackNum, kAudioBlockDuration));
2369 AppendCluster(video_id, GenerateSingleStreamCluster(10000,
2370 4 * kVideoBlockDuration + 10000, kVideoTrackNum, kVideoBlockDuration));
2372 // Should not be able to fulfill a seek to 0.
2373 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(0);
2374 demuxer_->StartWaitingForSeek(seek_time);
2375 demuxer_->Seek(seek_time,
2376 NewExpectedStatusCB(PIPELINE_ERROR_ABORT));
2377 ExpectRead(DemuxerStream::AUDIO, 0);
2378 ExpectEndOfStream(DemuxerStream::VIDEO);
2381 TEST_P(ChunkDemuxerTest, ClusterWithNoBuffers) {
2382 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2384 // Generate and append an empty cluster beginning at 0.
2385 AppendEmptyCluster(0);
2387 // Sanity check that data can be appended after this cluster correctly.
2388 AppendCluster(GenerateCluster(0, 2));
2389 ExpectRead(DemuxerStream::AUDIO, 0);
2390 ExpectRead(DemuxerStream::VIDEO, 0);
2393 TEST_P(ChunkDemuxerTest, CodecPrefixMatching) {
2394 ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
2396 #if defined(USE_PROPRIETARY_CODECS)
2397 expected = ChunkDemuxer::kOk;
2398 #endif
2400 std::vector<std::string> codecs;
2401 codecs.push_back("avc1.4D4041");
2403 EXPECT_EQ(demuxer_->AddId("source_id", "video/mp4", codecs,
2404 use_legacy_frame_processor_),
2405 expected);
2408 // Test codec ID's that are not compliant with RFC6381, but have been
2409 // seen in the wild.
2410 TEST_P(ChunkDemuxerTest, CodecIDsThatAreNotRFC6381Compliant) {
2411 ChunkDemuxer::Status expected = ChunkDemuxer::kNotSupported;
2413 #if defined(USE_PROPRIETARY_CODECS)
2414 expected = ChunkDemuxer::kOk;
2415 #endif
2416 const char* codec_ids[] = {
2417 // GPAC places leading zeros on the audio object type.
2418 "mp4a.40.02",
2419 "mp4a.40.05"
2422 for (size_t i = 0; i < arraysize(codec_ids); ++i) {
2423 std::vector<std::string> codecs;
2424 codecs.push_back(codec_ids[i]);
2426 ChunkDemuxer::Status result =
2427 demuxer_->AddId("source_id", "audio/mp4", codecs,
2428 use_legacy_frame_processor_);
2430 EXPECT_EQ(result, expected)
2431 << "Fail to add codec_id '" << codec_ids[i] << "'";
2433 if (result == ChunkDemuxer::kOk)
2434 demuxer_->RemoveId("source_id");
2438 TEST_P(ChunkDemuxerTest, EndOfStreamStillSetAfterSeek) {
2439 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2441 EXPECT_CALL(host_, SetDuration(_))
2442 .Times(AnyNumber());
2444 base::TimeDelta kLastAudioTimestamp = base::TimeDelta::FromMilliseconds(92);
2445 base::TimeDelta kLastVideoTimestamp = base::TimeDelta::FromMilliseconds(99);
2447 AppendCluster(kDefaultFirstCluster());
2448 AppendCluster(kDefaultSecondCluster());
2449 MarkEndOfStream(PIPELINE_OK);
2451 DemuxerStream::Status status;
2452 base::TimeDelta last_timestamp;
2454 // Verify that we can read audio & video to the end w/o problems.
2455 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2456 EXPECT_EQ(DemuxerStream::kOk, status);
2457 EXPECT_EQ(kLastAudioTimestamp, last_timestamp);
2459 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2460 EXPECT_EQ(DemuxerStream::kOk, status);
2461 EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
2463 // Seek back to 0 and verify that we can read to the end again..
2464 Seek(base::TimeDelta::FromMilliseconds(0));
2466 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2467 EXPECT_EQ(DemuxerStream::kOk, status);
2468 EXPECT_EQ(kLastAudioTimestamp, last_timestamp);
2470 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2471 EXPECT_EQ(DemuxerStream::kOk, status);
2472 EXPECT_EQ(kLastVideoTimestamp, last_timestamp);
2475 TEST_P(ChunkDemuxerTest, GetBufferedRangesBeforeInitSegment) {
2476 EXPECT_CALL(*this, DemuxerOpened());
2477 demuxer_->Initialize(&host_, CreateInitDoneCB(PIPELINE_OK), true);
2478 ASSERT_EQ(AddId("audio", HAS_AUDIO), ChunkDemuxer::kOk);
2479 ASSERT_EQ(AddId("video", HAS_VIDEO), ChunkDemuxer::kOk);
2481 CheckExpectedRanges("audio", "{ }");
2482 CheckExpectedRanges("video", "{ }");
2485 // Test that Seek() completes successfully when the first cluster
2486 // arrives.
2487 TEST_P(ChunkDemuxerTest, EndOfStreamDuringSeek) {
2488 InSequence s;
2490 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2492 AppendCluster(kDefaultFirstCluster());
2494 base::TimeDelta seek_time = base::TimeDelta::FromSeconds(0);
2495 demuxer_->StartWaitingForSeek(seek_time);
2497 AppendCluster(kDefaultSecondCluster());
2498 EXPECT_CALL(host_, SetDuration(
2499 base::TimeDelta::FromMilliseconds(kDefaultSecondClusterEndTimestamp)));
2500 MarkEndOfStream(PIPELINE_OK);
2502 demuxer_->Seek(seek_time, NewExpectedStatusCB(PIPELINE_OK));
2504 GenerateExpectedReads(0, 4);
2505 GenerateExpectedReads(46, 66, 5);
2507 EndOfStreamHelper end_of_stream_helper(demuxer_.get());
2508 end_of_stream_helper.RequestReads();
2509 end_of_stream_helper.CheckIfReadDonesWereCalled(true);
2512 TEST_P(ChunkDemuxerTest, ConfigChange_Video) {
2513 InSequence s;
2515 ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2517 DemuxerStream::Status status;
2518 base::TimeDelta last_timestamp;
2520 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2522 // Fetch initial video config and verify it matches what we expect.
2523 const VideoDecoderConfig& video_config_1 = video->video_decoder_config();
2524 ASSERT_TRUE(video_config_1.IsValidConfig());
2525 EXPECT_EQ(video_config_1.natural_size().width(), 320);
2526 EXPECT_EQ(video_config_1.natural_size().height(), 240);
2528 ExpectRead(DemuxerStream::VIDEO, 0);
2530 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2532 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2533 EXPECT_EQ(last_timestamp.InMilliseconds(), 501);
2535 // Fetch the new decoder config.
2536 const VideoDecoderConfig& video_config_2 = video->video_decoder_config();
2537 ASSERT_TRUE(video_config_2.IsValidConfig());
2538 EXPECT_EQ(video_config_2.natural_size().width(), 640);
2539 EXPECT_EQ(video_config_2.natural_size().height(), 360);
2541 ExpectRead(DemuxerStream::VIDEO, 527);
2543 // Read until the next config change.
2544 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2545 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2546 EXPECT_EQ(last_timestamp.InMilliseconds(), 793);
2548 // Get the new config and verify that it matches the first one.
2549 ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2551 ExpectRead(DemuxerStream::VIDEO, 801);
2553 // Read until the end of the stream just to make sure there aren't any other
2554 // config changes.
2555 ReadUntilNotOkOrEndOfStream(DemuxerStream::VIDEO, &status, &last_timestamp);
2556 ASSERT_EQ(status, DemuxerStream::kOk);
2559 TEST_P(ChunkDemuxerTest, ConfigChange_Audio) {
2560 InSequence s;
2562 ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2564 DemuxerStream::Status status;
2565 base::TimeDelta last_timestamp;
2567 DemuxerStream* audio = demuxer_->GetStream(DemuxerStream::AUDIO);
2569 // Fetch initial audio config and verify it matches what we expect.
2570 const AudioDecoderConfig& audio_config_1 = audio->audio_decoder_config();
2571 ASSERT_TRUE(audio_config_1.IsValidConfig());
2572 EXPECT_EQ(audio_config_1.samples_per_second(), 44100);
2573 EXPECT_EQ(audio_config_1.extra_data_size(), 3863u);
2575 ExpectRead(DemuxerStream::AUDIO, 0);
2577 // The first config change seen is from a splice frame representing an overlap
2578 // of buffer from config 1 by buffers from config 2.
2579 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2580 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2581 EXPECT_EQ(last_timestamp.InMilliseconds(), 524);
2583 // Fetch the new decoder config.
2584 const AudioDecoderConfig& audio_config_2 = audio->audio_decoder_config();
2585 ASSERT_TRUE(audio_config_2.IsValidConfig());
2586 EXPECT_EQ(audio_config_2.samples_per_second(), 44100);
2587 EXPECT_EQ(audio_config_2.extra_data_size(), 3935u);
2589 // The next config change is from a splice frame representing an overlap of
2590 // buffers from config 2 by buffers from config 1.
2591 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2592 ASSERT_EQ(status, DemuxerStream::kConfigChanged);
2593 EXPECT_EQ(last_timestamp.InMilliseconds(), 782);
2594 ASSERT_TRUE(audio_config_1.Matches(audio->audio_decoder_config()));
2596 // Read until the end of the stream just to make sure there aren't any other
2597 // config changes.
2598 ReadUntilNotOkOrEndOfStream(DemuxerStream::AUDIO, &status, &last_timestamp);
2599 ASSERT_EQ(status, DemuxerStream::kOk);
2600 EXPECT_EQ(last_timestamp.InMilliseconds(), 2744);
2603 TEST_P(ChunkDemuxerTest, ConfigChange_Seek) {
2604 InSequence s;
2606 ASSERT_TRUE(InitDemuxerWithConfigChangeData());
2608 DemuxerStream* video = demuxer_->GetStream(DemuxerStream::VIDEO);
2610 // Fetch initial video config and verify it matches what we expect.
2611 const VideoDecoderConfig& video_config_1 = video->video_decoder_config();
2612 ASSERT_TRUE(video_config_1.IsValidConfig());
2613 EXPECT_EQ(video_config_1.natural_size().width(), 320);
2614 EXPECT_EQ(video_config_1.natural_size().height(), 240);
2616 ExpectRead(DemuxerStream::VIDEO, 0);
2618 // Seek to a location with a different config.
2619 Seek(base::TimeDelta::FromMilliseconds(527));
2621 // Verify that the config change is signalled.
2622 ExpectConfigChanged(DemuxerStream::VIDEO);
2624 // Fetch the new decoder config and verify it is what we expect.
2625 const VideoDecoderConfig& video_config_2 = video->video_decoder_config();
2626 ASSERT_TRUE(video_config_2.IsValidConfig());
2627 EXPECT_EQ(video_config_2.natural_size().width(), 640);
2628 EXPECT_EQ(video_config_2.natural_size().height(), 360);
2630 // Verify that Read() will return a buffer now.
2631 ExpectRead(DemuxerStream::VIDEO, 527);
2633 // Seek back to the beginning and verify we get another config change.
2634 Seek(base::TimeDelta::FromMilliseconds(0));
2635 ExpectConfigChanged(DemuxerStream::VIDEO);
2636 ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2637 ExpectRead(DemuxerStream::VIDEO, 0);
2639 // Seek to a location that requires a config change and then
2640 // seek to a new location that has the same configuration as
2641 // the start of the file without a Read() in the middle.
2642 Seek(base::TimeDelta::FromMilliseconds(527));
2643 Seek(base::TimeDelta::FromMilliseconds(801));
2645 // Verify that no config change is signalled.
2646 ExpectRead(DemuxerStream::VIDEO, 801);
2647 ASSERT_TRUE(video_config_1.Matches(video->video_decoder_config()));
2650 TEST_P(ChunkDemuxerTest, TimestampPositiveOffset) {
2651 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2653 ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(30)));
2654 AppendCluster(GenerateCluster(0, 2));
2656 Seek(base::TimeDelta::FromMilliseconds(30000));
2658 GenerateExpectedReads(30000, 2);
2661 TEST_P(ChunkDemuxerTest, TimestampNegativeOffset) {
2662 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2664 ASSERT_TRUE(SetTimestampOffset(kSourceId, base::TimeDelta::FromSeconds(-1)));
2665 AppendCluster(GenerateCluster(1000, 2));
2667 GenerateExpectedReads(0, 2);
2670 TEST_P(ChunkDemuxerTest, TimestampOffsetSeparateStreams) {
2671 std::string audio_id = "audio1";
2672 std::string video_id = "video1";
2673 ASSERT_TRUE(InitDemuxerAudioAndVideoSources(audio_id, video_id));
2675 ASSERT_TRUE(SetTimestampOffset(
2676 audio_id, base::TimeDelta::FromMilliseconds(-2500)));
2677 ASSERT_TRUE(SetTimestampOffset(
2678 video_id, base::TimeDelta::FromMilliseconds(-2500)));
2679 AppendCluster(audio_id, GenerateSingleStreamCluster(2500,
2680 2500 + kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
2681 AppendCluster(video_id, GenerateSingleStreamCluster(2500,
2682 2500 + kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
2683 GenerateAudioStreamExpectedReads(0, 4);
2684 GenerateVideoStreamExpectedReads(0, 4);
2686 Seek(base::TimeDelta::FromMilliseconds(27300));
2688 ASSERT_TRUE(SetTimestampOffset(
2689 audio_id, base::TimeDelta::FromMilliseconds(27300)));
2690 ASSERT_TRUE(SetTimestampOffset(
2691 video_id, base::TimeDelta::FromMilliseconds(27300)));
2692 AppendCluster(audio_id, GenerateSingleStreamCluster(
2693 0, kAudioBlockDuration * 4, kAudioTrackNum, kAudioBlockDuration));
2694 AppendCluster(video_id, GenerateSingleStreamCluster(
2695 0, kVideoBlockDuration * 4, kVideoTrackNum, kVideoBlockDuration));
2696 GenerateVideoStreamExpectedReads(27300, 4);
2697 GenerateAudioStreamExpectedReads(27300, 4);
2700 TEST_P(ChunkDemuxerTest, IsParsingMediaSegmentMidMediaSegment) {
2701 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2703 scoped_ptr<Cluster> cluster = GenerateCluster(0, 2);
2704 // Append only part of the cluster data.
2705 AppendData(cluster->data(), cluster->size() - 13);
2707 // Confirm we're in the middle of parsing a media segment.
2708 ASSERT_TRUE(demuxer_->IsParsingMediaSegment(kSourceId));
2710 demuxer_->Abort(kSourceId);
2711 // After Abort(), parsing should no longer be in the middle of a media
2712 // segment.
2713 ASSERT_FALSE(demuxer_->IsParsingMediaSegment(kSourceId));
2716 TEST_P(ChunkDemuxerTest, WebMIsParsingMediaSegmentDetection) {
2717 // TODO(wolenetz): Also test 'unknown' sized clusters.
2718 // See http://crbug.com/335676.
2719 const uint8 kBuffer[] = {
2720 0x1F, 0x43, 0xB6, 0x75, 0x83, // CLUSTER (size = 3)
2721 0xE7, 0x81, 0x01, // Cluster TIMECODE (value = 1)
2724 // This array indicates expected return value of IsParsingMediaSegment()
2725 // following each incrementally appended byte in |kBuffer|.
2726 const bool kExpectedReturnValues[] = {
2727 false, false, false, false, true,
2728 true, true, false,
2731 COMPILE_ASSERT(arraysize(kBuffer) == arraysize(kExpectedReturnValues),
2732 test_arrays_out_of_sync);
2733 COMPILE_ASSERT(arraysize(kBuffer) == sizeof(kBuffer), not_one_byte_per_index);
2735 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2737 for (size_t i = 0; i < sizeof(kBuffer); i++) {
2738 DVLOG(3) << "Appending and testing index " << i;
2739 AppendData(kBuffer + i, 1);
2740 bool expected_return_value = kExpectedReturnValues[i];
2741 EXPECT_EQ(expected_return_value,
2742 demuxer_->IsParsingMediaSegment(kSourceId));
2746 TEST_P(ChunkDemuxerTest, DurationChange) {
2747 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2748 const int kStreamDuration = kDefaultDuration().InMilliseconds();
2750 // Add data leading up to the currently set duration.
2751 AppendCluster(GenerateCluster(kStreamDuration - kAudioBlockDuration,
2752 kStreamDuration - kVideoBlockDuration,
2753 2));
2755 CheckExpectedRanges(kSourceId, "{ [201191,201224) }");
2757 // Add data beginning at the currently set duration and expect a new duration
2758 // to be signaled. Note that the last video block will have a higher end
2759 // timestamp than the last audio block.
2760 // TODO(wolenetz): Compliant coded frame processor will emit a max of one
2761 // duration change per each ProcessFrames(). Remove the first expectation here
2762 // once compliant coded frame processor is used. See http://crbug.com/249422.
2763 const int kNewStreamDurationAudio = kStreamDuration + kAudioBlockDuration;
2764 EXPECT_CALL(host_, SetDuration(
2765 base::TimeDelta::FromMilliseconds(kNewStreamDurationAudio)));
2766 const int kNewStreamDurationVideo = kStreamDuration + kVideoBlockDuration;
2767 EXPECT_CALL(host_, SetDuration(
2768 base::TimeDelta::FromMilliseconds(kNewStreamDurationVideo)));
2769 AppendCluster(GenerateCluster(kDefaultDuration().InMilliseconds(), 2));
2771 CheckExpectedRanges(kSourceId, "{ [201191,201247) }");
2773 // Add more data to the end of each media type. Note that the last audio block
2774 // will have a higher end timestamp than the last video block.
2775 const int kFinalStreamDuration = kStreamDuration + kAudioBlockDuration * 3;
2776 EXPECT_CALL(host_, SetDuration(
2777 base::TimeDelta::FromMilliseconds(kFinalStreamDuration)));
2778 AppendCluster(GenerateCluster(kStreamDuration + kAudioBlockDuration,
2779 kStreamDuration + kVideoBlockDuration,
2780 3));
2782 // See that the range has increased appropriately (but not to the full
2783 // duration of 201293, since there is not enough video appended for that).
2784 CheckExpectedRanges(kSourceId, "{ [201191,201290) }");
2787 TEST_P(ChunkDemuxerTest, DurationChangeTimestampOffset) {
2788 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2790 ASSERT_TRUE(SetTimestampOffset(kSourceId, kDefaultDuration()));
2792 // TODO(wolenetz): Compliant coded frame processor will emit a max of one
2793 // duration change per each ProcessFrames(). Remove the first expectation here
2794 // once compliant coded frame processor is used. See http://crbug.com/249422.
2795 EXPECT_CALL(host_, SetDuration(
2796 kDefaultDuration() + base::TimeDelta::FromMilliseconds(
2797 kAudioBlockDuration * 2)));
2798 EXPECT_CALL(host_, SetDuration(
2799 kDefaultDuration() + base::TimeDelta::FromMilliseconds(
2800 kVideoBlockDuration * 2)));
2801 AppendCluster(GenerateCluster(0, 4));
2804 TEST_P(ChunkDemuxerTest, EndOfStreamTruncateDuration) {
2805 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2807 AppendCluster(kDefaultFirstCluster());
2809 EXPECT_CALL(host_, SetDuration(
2810 base::TimeDelta::FromMilliseconds(kDefaultFirstClusterEndTimestamp)));
2811 MarkEndOfStream(PIPELINE_OK);
2815 TEST_P(ChunkDemuxerTest, ZeroLengthAppend) {
2816 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2817 AppendData(NULL, 0);
2820 TEST_P(ChunkDemuxerTest, AppendAfterEndOfStream) {
2821 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2823 EXPECT_CALL(host_, SetDuration(_))
2824 .Times(AnyNumber());
2826 AppendCluster(kDefaultFirstCluster());
2827 MarkEndOfStream(PIPELINE_OK);
2829 demuxer_->UnmarkEndOfStream();
2831 AppendCluster(kDefaultSecondCluster());
2832 MarkEndOfStream(PIPELINE_OK);
2835 // Test receiving a Shutdown() call before we get an Initialize()
2836 // call. This can happen if video element gets destroyed before
2837 // the pipeline has a chance to initialize the demuxer.
2838 TEST_P(ChunkDemuxerTest, Shutdown_BeforeInitialize) {
2839 demuxer_->Shutdown();
2840 demuxer_->Initialize(
2841 &host_, CreateInitDoneCB(DEMUXER_ERROR_COULD_NOT_OPEN), true);
2842 message_loop_.RunUntilIdle();
2845 TEST_P(ChunkDemuxerTest, ReadAfterAudioDisabled) {
2846 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2847 AppendCluster(kDefaultFirstCluster());
2849 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
2850 ASSERT_TRUE(stream);
2852 // The stream should no longer be present.
2853 demuxer_->OnAudioRendererDisabled();
2854 ASSERT_FALSE(demuxer_->GetStream(DemuxerStream::AUDIO));
2856 // Normally this would return an audio buffer at timestamp zero, but
2857 // all reads should return EOS buffers when disabled.
2858 bool audio_read_done = false;
2859 stream->Read(base::Bind(&OnReadDone_EOSExpected, &audio_read_done));
2860 message_loop_.RunUntilIdle();
2862 EXPECT_TRUE(audio_read_done);
2865 // Verifies that signaling end of stream while stalled at a gap
2866 // boundary does not trigger end of stream buffers to be returned.
2867 TEST_P(ChunkDemuxerTest, EndOfStreamWhileWaitingForGapToBeFilled) {
2868 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2870 AppendCluster(0, 10);
2871 AppendCluster(300, 10);
2872 CheckExpectedRanges(kSourceId, "{ [0,132) [300,432) }");
2874 GenerateExpectedReads(0, 10);
2876 bool audio_read_done = false;
2877 bool video_read_done = false;
2878 ReadAudio(base::Bind(&OnReadDone,
2879 base::TimeDelta::FromMilliseconds(138),
2880 &audio_read_done));
2881 ReadVideo(base::Bind(&OnReadDone,
2882 base::TimeDelta::FromMilliseconds(138),
2883 &video_read_done));
2885 // Verify that the reads didn't complete
2886 EXPECT_FALSE(audio_read_done);
2887 EXPECT_FALSE(video_read_done);
2889 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(438)));
2890 MarkEndOfStream(PIPELINE_OK);
2892 // Verify that the reads still haven't completed.
2893 EXPECT_FALSE(audio_read_done);
2894 EXPECT_FALSE(video_read_done);
2896 demuxer_->UnmarkEndOfStream();
2898 AppendCluster(138, 22);
2900 message_loop_.RunUntilIdle();
2902 CheckExpectedRanges(kSourceId, "{ [0,435) }");
2904 // Verify that the reads have completed.
2905 EXPECT_TRUE(audio_read_done);
2906 EXPECT_TRUE(video_read_done);
2908 // Read the rest of the buffers.
2909 GenerateExpectedReads(161, 171, 20);
2911 // Verify that reads block because the append cleared the end of stream state.
2912 audio_read_done = false;
2913 video_read_done = false;
2914 ReadAudio(base::Bind(&OnReadDone_EOSExpected,
2915 &audio_read_done));
2916 ReadVideo(base::Bind(&OnReadDone_EOSExpected,
2917 &video_read_done));
2919 // Verify that the reads don't complete.
2920 EXPECT_FALSE(audio_read_done);
2921 EXPECT_FALSE(video_read_done);
2923 EXPECT_CALL(host_, SetDuration(base::TimeDelta::FromMilliseconds(437)));
2924 MarkEndOfStream(PIPELINE_OK);
2926 EXPECT_TRUE(audio_read_done);
2927 EXPECT_TRUE(video_read_done);
2930 TEST_P(ChunkDemuxerTest, CanceledSeekDuringInitialPreroll) {
2931 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
2933 // Cancel preroll.
2934 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(200);
2935 demuxer_->CancelPendingSeek(seek_time);
2937 // Initiate the seek to the new location.
2938 Seek(seek_time);
2940 // Append data to satisfy the seek.
2941 AppendCluster(seek_time.InMilliseconds(), 10);
2944 TEST_P(ChunkDemuxerTest, GCDuringSeek) {
2945 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
2947 demuxer_->SetMemoryLimitsForTesting(5 * kBlockSize);
2949 base::TimeDelta seek_time1 = base::TimeDelta::FromMilliseconds(1000);
2950 base::TimeDelta seek_time2 = base::TimeDelta::FromMilliseconds(500);
2952 // Initiate a seek to |seek_time1|.
2953 Seek(seek_time1);
2955 // Append data to satisfy the first seek request.
2956 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
2957 seek_time1.InMilliseconds(), 5);
2958 CheckExpectedRanges(kSourceId, "{ [1000,1115) }");
2960 // Signal that the second seek is starting.
2961 demuxer_->StartWaitingForSeek(seek_time2);
2963 // Append data to satisfy the second seek. This append triggers
2964 // the garbage collection logic since we set the memory limit to
2965 // 5 blocks.
2966 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
2967 seek_time2.InMilliseconds(), 5);
2969 // Verify that the buffers that cover |seek_time2| do not get
2970 // garbage collected.
2971 CheckExpectedRanges(kSourceId, "{ [500,615) }");
2973 // Complete the seek.
2974 demuxer_->Seek(seek_time2, NewExpectedStatusCB(PIPELINE_OK));
2977 // Append more data and make sure that the blocks for |seek_time2|
2978 // don't get removed.
2980 // NOTE: The current GC algorithm tries to preserve the GOP at the
2981 // current position as well as the last appended GOP. This is
2982 // why there are 2 ranges in the expectations.
2983 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, 700, 5);
2984 CheckExpectedRanges(kSourceId, "{ [500,592) [792,815) }");
2987 TEST_P(ChunkDemuxerTest, RemoveBeforeInitSegment) {
2988 EXPECT_CALL(*this, DemuxerOpened());
2989 demuxer_->Initialize(
2990 &host_, CreateInitDoneCB(kNoTimestamp(), PIPELINE_OK), true);
2992 EXPECT_EQ(ChunkDemuxer::kOk, AddId(kSourceId, HAS_AUDIO | HAS_VIDEO));
2994 demuxer_->Remove(kSourceId, base::TimeDelta::FromMilliseconds(0),
2995 base::TimeDelta::FromMilliseconds(1));
2998 TEST_P(ChunkDemuxerTest, AppendWindow_Video) {
2999 ASSERT_TRUE(InitDemuxer(HAS_VIDEO));
3000 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3002 // Set the append window to [20,280).
3003 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
3004 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3006 // Append a cluster that starts before and ends after the append window.
3007 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3008 "0K 30 60 90 120K 150 180 210 240K 270 300 330K");
3010 // Verify that GOPs that start outside the window are not included
3011 // in the buffer. Also verify that buffers that start inside the
3012 // window and extend beyond the end of the window are not included.
3013 CheckExpectedRanges(kSourceId, "{ [120,270) }");
3014 CheckExpectedBuffers(stream, "120 150 180 210 240");
3016 // Extend the append window to [20,650).
3017 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3019 // Append more data and verify that adding buffers start at the next
3020 // keyframe.
3021 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3022 "360 390 420K 450 480 510 540K 570 600 630K");
3023 CheckExpectedRanges(kSourceId, "{ [120,270) [420,630) }");
3026 TEST_P(ChunkDemuxerTest, AppendWindow_Audio) {
3027 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3028 DemuxerStream* stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3030 // Set the append window to [20,280).
3031 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
3032 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3034 // Append a cluster that starts before and ends after the append window.
3035 AppendSingleStreamCluster(
3036 kSourceId, kAudioTrackNum,
3037 "0K 30K 60K 90K 120K 150K 180K 210K 240K 270K 300K 330K");
3039 // Verify that frames that end outside the window are not included
3040 // in the buffer. Also verify that buffers that start inside the
3041 // window and extend beyond the end of the window are not included.
3043 // The first 20ms of the first buffer should be trimmed off since it
3044 // overlaps the start of the append window.
3045 CheckExpectedRanges(kSourceId, "{ [20,270) }");
3046 CheckExpectedBuffers(stream, "20 30 60 90 120 150 180 210 240");
3048 // Extend the append window to [20,650).
3049 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3051 // Append more data and verify that a new range is created.
3052 AppendSingleStreamCluster(
3053 kSourceId, kAudioTrackNum,
3054 "360K 390K 420K 450K 480K 510K 540K 570K 600K 630K");
3055 CheckExpectedRanges(kSourceId, "{ [20,270) [360,630) }");
3058 TEST_P(ChunkDemuxerTest, AppendWindow_AudioOverlapStartAndEnd) {
3059 ASSERT_TRUE(InitDemuxer(HAS_AUDIO));
3061 // Set the append window to [10,20).
3062 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(10);
3063 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
3065 // Append a cluster that starts before and ends after the append window.
3066 AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K");
3068 // Verify that everything is dropped in this case. No partial append should
3069 // be generated.
3070 CheckExpectedRanges(kSourceId, "{ }");
3073 TEST_P(ChunkDemuxerTest, AppendWindow_Text) {
3074 DemuxerStream* text_stream = NULL;
3075 EXPECT_CALL(host_, AddTextStream(_, _))
3076 .WillOnce(SaveArg<0>(&text_stream));
3077 ASSERT_TRUE(InitDemuxer(HAS_VIDEO | HAS_TEXT));
3078 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3080 // Set the append window to [20,280).
3081 append_window_start_for_next_append_ = base::TimeDelta::FromMilliseconds(20);
3082 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(280);
3084 // Append a cluster that starts before and ends after the append
3085 // window.
3086 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3087 "0K 30 60 90 120K 150 180 210 240K 270 300 330K");
3088 AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K 200K 300K");
3090 // Verify that text cues that start outside the window are not included
3091 // in the buffer. Also verify that cues that extend beyond the
3092 // window are not included.
3093 CheckExpectedRanges(kSourceId, "{ [120,270) }");
3094 CheckExpectedBuffers(video_stream, "120 150 180 210 240");
3095 CheckExpectedBuffers(text_stream, "100");
3097 // Extend the append window to [20,650).
3098 append_window_end_for_next_append_ = base::TimeDelta::FromMilliseconds(650);
3100 // Append more data and verify that a new range is created.
3101 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3102 "360 390 420K 450 480 510 540K 570 600 630K");
3103 AppendSingleStreamCluster(kSourceId, kTextTrackNum, "400K 500K 600K 700K");
3104 CheckExpectedRanges(kSourceId, "{ [120,270) [420,630) }");
3106 // Seek to the new range and verify that the expected buffers are returned.
3107 Seek(base::TimeDelta::FromMilliseconds(420));
3108 CheckExpectedBuffers(video_stream, "420 450 480 510 540 570 600");
3109 CheckExpectedBuffers(text_stream, "400 500");
3112 TEST_P(ChunkDemuxerTest, StartWaitingForSeekAfterParseError) {
3113 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO));
3114 EXPECT_CALL(host_, OnDemuxerError(PIPELINE_ERROR_DECODE));
3115 AppendGarbage();
3116 base::TimeDelta seek_time = base::TimeDelta::FromSeconds(50);
3117 demuxer_->StartWaitingForSeek(seek_time);
3120 TEST_P(ChunkDemuxerTest, Remove_AudioVideoText) {
3121 DemuxerStream* text_stream = NULL;
3122 EXPECT_CALL(host_, AddTextStream(_, _))
3123 .WillOnce(SaveArg<0>(&text_stream));
3124 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
3126 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3127 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3129 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3130 "0K 20K 40K 60K 80K 100K 120K 140K");
3131 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3132 "0K 30 60 90 120K 150 180");
3133 AppendSingleStreamCluster(kSourceId, kTextTrackNum, "0K 100K 200K");
3135 CheckExpectedBuffers(audio_stream, "0 20 40 60 80 100 120 140");
3136 CheckExpectedBuffers(video_stream, "0 30 60 90 120 150 180");
3137 CheckExpectedBuffers(text_stream, "0 100 200");
3139 // Remove the buffers that were added.
3140 demuxer_->Remove(kSourceId, base::TimeDelta(),
3141 base::TimeDelta::FromMilliseconds(300));
3143 // Verify that all the appended data has been removed.
3144 CheckExpectedRanges(kSourceId, "{ }");
3146 // Append new buffers that are clearly different than the original
3147 // ones and verify that only the new buffers are returned.
3148 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3149 "1K 21K 41K 61K 81K 101K 121K 141K");
3150 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3151 "1K 31 61 91 121K 151 181");
3152 AppendSingleStreamCluster(kSourceId, kTextTrackNum, "1K 101K 201K");
3154 Seek(base::TimeDelta());
3155 CheckExpectedBuffers(audio_stream, "1 21 41 61 81 101 121 141");
3156 CheckExpectedBuffers(video_stream, "1 31 61 91 121 151 181");
3157 CheckExpectedBuffers(text_stream, "1 101 201");
3160 // Verifies that a Seek() will complete without text cues for
3161 // the seek point and will return cues after the seek position
3162 // when they are eventually appended.
3163 TEST_P(ChunkDemuxerTest, SeekCompletesWithoutTextCues) {
3164 DemuxerStream* text_stream = NULL;
3165 EXPECT_CALL(host_, AddTextStream(_, _))
3166 .WillOnce(SaveArg<0>(&text_stream));
3167 ASSERT_TRUE(InitDemuxer(HAS_AUDIO | HAS_VIDEO | HAS_TEXT));
3169 DemuxerStream* audio_stream = demuxer_->GetStream(DemuxerStream::AUDIO);
3170 DemuxerStream* video_stream = demuxer_->GetStream(DemuxerStream::VIDEO);
3172 base::TimeDelta seek_time = base::TimeDelta::FromMilliseconds(120);
3173 bool seek_cb_was_called = false;
3174 demuxer_->StartWaitingForSeek(seek_time);
3175 demuxer_->Seek(seek_time,
3176 base::Bind(OnSeekDone_OKExpected, &seek_cb_was_called));
3177 message_loop_.RunUntilIdle();
3179 EXPECT_FALSE(seek_cb_was_called);
3181 bool text_read_done = false;
3182 text_stream->Read(base::Bind(&OnReadDone,
3183 base::TimeDelta::FromMilliseconds(125),
3184 &text_read_done));
3186 // Append audio & video data so the seek completes.
3187 AppendSingleStreamCluster(kSourceId, kAudioTrackNum,
3188 "0K 20K 40K 60K 80K 100K 120K 140K 160K 180K");
3189 AppendSingleStreamCluster(kSourceId, kVideoTrackNum,
3190 "0K 30 60 90 120K 150 180 210");
3192 message_loop_.RunUntilIdle();
3193 EXPECT_TRUE(seek_cb_was_called);
3194 EXPECT_FALSE(text_read_done);
3196 // Read some audio & video buffers to further verify seek completion.
3197 CheckExpectedBuffers(audio_stream, "120 140");
3198 CheckExpectedBuffers(video_stream, "120 150");
3200 EXPECT_FALSE(text_read_done);
3202 // Append text cues that start after the seek point and verify that
3203 // they are returned by Read() calls.
3204 AppendSingleStreamCluster(kSourceId, kTextTrackNum, "125K 175K 225K");
3206 message_loop_.RunUntilIdle();
3207 EXPECT_TRUE(text_read_done);
3209 // NOTE: we start at 175 here because the buffer at 125 was returned
3210 // to the pending read initiated above.
3211 CheckExpectedBuffers(text_stream, "175 225");
3213 // Verify that audio & video streams continue to return expected values.
3214 CheckExpectedBuffers(audio_stream, "160 180");
3215 CheckExpectedBuffers(video_stream, "180 210");
3218 // TODO(wolenetz): Enable testing of new frame processor based on this flag,
3219 // once the new processor has landed. See http://crbug.com/249422.
3220 INSTANTIATE_TEST_CASE_P(LegacyFrameProcessor, ChunkDemuxerTest, Values(true));
3222 } // namespace media