[MD settings] moving attached() code
[chromium-blink-merge.git] / media / filters / frame_processor.cc
blob80c5dab1980a0570e95a50aca8c155c694d2b61a
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "media/filters/frame_processor.h"
7 #include <cstdlib>
9 #include "base/stl_util.h"
10 #include "media/base/stream_parser_buffer.h"
11 #include "media/base/timestamp_constants.h"
13 namespace media {
15 const int kMaxDroppedPrerollWarnings = 10;
16 const int kMaxDtsBeyondPtsWarnings = 10;
18 // Helper class to capture per-track details needed by a frame processor. Some
19 // of this information may be duplicated in the short-term in the associated
20 // ChunkDemuxerStream and SourceBufferStream for a track.
21 // This parallels the MSE spec each of a SourceBuffer's Track Buffers at
22 // http://www.w3.org/TR/media-source/#track-buffers.
23 class MseTrackBuffer {
24 public:
25 explicit MseTrackBuffer(ChunkDemuxerStream* stream);
26 ~MseTrackBuffer();
28 // Get/set |last_decode_timestamp_|.
29 DecodeTimestamp last_decode_timestamp() const {
30 return last_decode_timestamp_;
32 void set_last_decode_timestamp(DecodeTimestamp timestamp) {
33 last_decode_timestamp_ = timestamp;
36 // Get/set |last_frame_duration_|.
37 base::TimeDelta last_frame_duration() const {
38 return last_frame_duration_;
40 void set_last_frame_duration(base::TimeDelta duration) {
41 last_frame_duration_ = duration;
44 // Gets |highest_presentation_timestamp_|.
45 base::TimeDelta highest_presentation_timestamp() const {
46 return highest_presentation_timestamp_;
49 // Get/set |needs_random_access_point_|.
50 bool needs_random_access_point() const {
51 return needs_random_access_point_;
53 void set_needs_random_access_point(bool needs_random_access_point) {
54 needs_random_access_point_ = needs_random_access_point;
57 // Gets a pointer to this track's ChunkDemuxerStream.
58 ChunkDemuxerStream* stream() const { return stream_; }
60 // Unsets |last_decode_timestamp_|, unsets |last_frame_duration_|,
61 // unsets |highest_presentation_timestamp_|, and sets
62 // |needs_random_access_point_| to true.
63 void Reset();
65 // If |highest_presentation_timestamp_| is unset or |timestamp| is greater
66 // than |highest_presentation_timestamp_|, sets
67 // |highest_presentation_timestamp_| to |timestamp|. Note that bidirectional
68 // prediction between coded frames can cause |timestamp| to not be
69 // monotonically increasing even though the decode timestamps are
70 // monotonically increasing.
71 void SetHighestPresentationTimestampIfIncreased(base::TimeDelta timestamp);
73 // Adds |frame| to the end of |processed_frames_|.
74 void EnqueueProcessedFrame(const scoped_refptr<StreamParserBuffer>& frame);
76 // Appends |processed_frames_|, if not empty, to |stream_| and clears
77 // |processed_frames_|. Returns false if append failed, true otherwise.
78 // |processed_frames_| is cleared in both cases.
79 bool FlushProcessedFrames();
81 private:
82 // The decode timestamp of the last coded frame appended in the current coded
83 // frame group. Initially kNoTimestamp(), meaning "unset".
84 DecodeTimestamp last_decode_timestamp_;
86 // The coded frame duration of the last coded frame appended in the current
87 // coded frame group. Initially kNoTimestamp(), meaning "unset".
88 base::TimeDelta last_frame_duration_;
90 // The highest presentation timestamp encountered in a coded frame appended
91 // in the current coded frame group. Initially kNoTimestamp(), meaning
92 // "unset".
93 base::TimeDelta highest_presentation_timestamp_;
95 // Keeps track of whether the track buffer is waiting for a random access
96 // point coded frame. Initially set to true to indicate that a random access
97 // point coded frame is needed before anything can be added to the track
98 // buffer.
99 bool needs_random_access_point_;
101 // Pointer to the stream associated with this track. The stream is not owned
102 // by |this|.
103 ChunkDemuxerStream* const stream_;
105 // Queue of processed frames that have not yet been appended to |stream_|.
106 // EnqueueProcessedFrame() adds to this queue, and FlushProcessedFrames()
107 // clears it.
108 StreamParser::BufferQueue processed_frames_;
110 DISALLOW_COPY_AND_ASSIGN(MseTrackBuffer);
113 MseTrackBuffer::MseTrackBuffer(ChunkDemuxerStream* stream)
114 : last_decode_timestamp_(kNoDecodeTimestamp()),
115 last_frame_duration_(kNoTimestamp()),
116 highest_presentation_timestamp_(kNoTimestamp()),
117 needs_random_access_point_(true),
118 stream_(stream) {
119 DCHECK(stream_);
122 MseTrackBuffer::~MseTrackBuffer() {
123 DVLOG(2) << __FUNCTION__ << "()";
126 void MseTrackBuffer::Reset() {
127 DVLOG(2) << __FUNCTION__ << "()";
129 last_decode_timestamp_ = kNoDecodeTimestamp();
130 last_frame_duration_ = kNoTimestamp();
131 highest_presentation_timestamp_ = kNoTimestamp();
132 needs_random_access_point_ = true;
135 void MseTrackBuffer::SetHighestPresentationTimestampIfIncreased(
136 base::TimeDelta timestamp) {
137 if (highest_presentation_timestamp_ == kNoTimestamp() ||
138 timestamp > highest_presentation_timestamp_) {
139 highest_presentation_timestamp_ = timestamp;
143 void MseTrackBuffer::EnqueueProcessedFrame(
144 const scoped_refptr<StreamParserBuffer>& frame) {
145 processed_frames_.push_back(frame);
148 bool MseTrackBuffer::FlushProcessedFrames() {
149 if (processed_frames_.empty())
150 return true;
152 bool result = stream_->Append(processed_frames_);
153 processed_frames_.clear();
155 DVLOG_IF(3, !result) << __FUNCTION__
156 << "(): Failure appending processed frames to stream";
158 return result;
161 FrameProcessor::FrameProcessor(const UpdateDurationCB& update_duration_cb,
162 const scoped_refptr<MediaLog>& media_log)
163 : group_start_timestamp_(kNoTimestamp()),
164 update_duration_cb_(update_duration_cb),
165 media_log_(media_log) {
166 DVLOG(2) << __FUNCTION__ << "()";
167 DCHECK(!update_duration_cb.is_null());
170 FrameProcessor::~FrameProcessor() {
171 DVLOG(2) << __FUNCTION__ << "()";
172 STLDeleteValues(&track_buffers_);
175 void FrameProcessor::SetSequenceMode(bool sequence_mode) {
176 DVLOG(2) << __FUNCTION__ << "(" << sequence_mode << ")";
178 // Per April 1, 2014 MSE spec editor's draft:
179 // https://dvcs.w3.org/hg/html-media/raw-file/d471a4412040/media-source/media-source.html#widl-SourceBuffer-mode
180 // Step 7: If the new mode equals "sequence", then set the group start
181 // timestamp to the group end timestamp.
182 if (sequence_mode) {
183 DCHECK(kNoTimestamp() != group_end_timestamp_);
184 group_start_timestamp_ = group_end_timestamp_;
187 // Step 8: Update the attribute to new mode.
188 sequence_mode_ = sequence_mode;
191 bool FrameProcessor::ProcessFrames(
192 const StreamParser::BufferQueue& audio_buffers,
193 const StreamParser::BufferQueue& video_buffers,
194 const StreamParser::TextBufferQueueMap& text_map,
195 base::TimeDelta append_window_start,
196 base::TimeDelta append_window_end,
197 bool* new_media_segment,
198 base::TimeDelta* timestamp_offset) {
199 StreamParser::BufferQueue frames;
200 if (!MergeBufferQueues(audio_buffers, video_buffers, text_map, &frames)) {
201 MEDIA_LOG(ERROR, media_log_) << "Parsed buffers not in DTS sequence";
202 return false;
205 DCHECK(!frames.empty());
207 // Implements the coded frame processing algorithm's outer loop for step 1.
208 // Note that ProcessFrame() implements an inner loop for a single frame that
209 // handles "jump to the Loop Top step to restart processing of the current
210 // coded frame" per April 1, 2014 MSE spec editor's draft:
211 // https://dvcs.w3.org/hg/html-media/raw-file/d471a4412040/media-source/
212 // media-source.html#sourcebuffer-coded-frame-processing
213 // 1. For each coded frame in the media segment run the following steps:
214 for (StreamParser::BufferQueue::const_iterator frames_itr = frames.begin();
215 frames_itr != frames.end(); ++frames_itr) {
216 if (!ProcessFrame(*frames_itr, append_window_start, append_window_end,
217 timestamp_offset, new_media_segment)) {
218 FlushProcessedFrames();
219 return false;
223 if (!FlushProcessedFrames())
224 return false;
226 // 2. - 4. Are handled by the WebMediaPlayer / Pipeline / Media Element.
228 // Step 5:
229 update_duration_cb_.Run(group_end_timestamp_);
231 return true;
234 void FrameProcessor::SetGroupStartTimestampIfInSequenceMode(
235 base::TimeDelta timestamp_offset) {
236 DVLOG(2) << __FUNCTION__ << "(" << timestamp_offset.InSecondsF() << ")";
237 DCHECK(kNoTimestamp() != timestamp_offset);
238 if (sequence_mode_)
239 group_start_timestamp_ = timestamp_offset;
241 // Changes to timestampOffset should invalidate the preroll buffer.
242 audio_preroll_buffer_ = NULL;
245 bool FrameProcessor::AddTrack(StreamParser::TrackId id,
246 ChunkDemuxerStream* stream) {
247 DVLOG(2) << __FUNCTION__ << "(): id=" << id;
249 MseTrackBuffer* existing_track = FindTrack(id);
250 DCHECK(!existing_track);
251 if (existing_track) {
252 MEDIA_LOG(ERROR, media_log_) << "Failure adding track with duplicate ID "
253 << id;
254 return false;
257 track_buffers_[id] = new MseTrackBuffer(stream);
258 return true;
261 bool FrameProcessor::UpdateTrack(StreamParser::TrackId old_id,
262 StreamParser::TrackId new_id) {
263 DVLOG(2) << __FUNCTION__ << "() : old_id=" << old_id << ", new_id=" << new_id;
265 if (old_id == new_id || !FindTrack(old_id) || FindTrack(new_id)) {
266 MEDIA_LOG(ERROR, media_log_) << "Failure updating track id from " << old_id
267 << " to " << new_id;
268 return false;
271 track_buffers_[new_id] = track_buffers_[old_id];
272 CHECK_EQ(1u, track_buffers_.erase(old_id));
273 return true;
276 void FrameProcessor::SetAllTrackBuffersNeedRandomAccessPoint() {
277 for (TrackBufferMap::iterator itr = track_buffers_.begin();
278 itr != track_buffers_.end();
279 ++itr) {
280 itr->second->set_needs_random_access_point(true);
284 void FrameProcessor::Reset() {
285 DVLOG(2) << __FUNCTION__ << "()";
286 for (TrackBufferMap::iterator itr = track_buffers_.begin();
287 itr != track_buffers_.end(); ++itr) {
288 itr->second->Reset();
291 if (sequence_mode_) {
292 DCHECK(kNoTimestamp() != group_end_timestamp_);
293 group_start_timestamp_ = group_end_timestamp_;
297 void FrameProcessor::OnPossibleAudioConfigUpdate(
298 const AudioDecoderConfig& config) {
299 DCHECK(config.IsValidConfig());
301 // Always clear the preroll buffer when a config update is received.
302 audio_preroll_buffer_ = NULL;
304 if (config.Matches(current_audio_config_))
305 return;
307 current_audio_config_ = config;
308 sample_duration_ = base::TimeDelta::FromSecondsD(
309 1.0 / current_audio_config_.samples_per_second());
312 MseTrackBuffer* FrameProcessor::FindTrack(StreamParser::TrackId id) {
313 TrackBufferMap::iterator itr = track_buffers_.find(id);
314 if (itr == track_buffers_.end())
315 return NULL;
317 return itr->second;
320 void FrameProcessor::NotifyNewMediaSegmentStarting(
321 DecodeTimestamp segment_timestamp) {
322 DVLOG(2) << __FUNCTION__ << "(" << segment_timestamp.InSecondsF() << ")";
324 for (TrackBufferMap::iterator itr = track_buffers_.begin();
325 itr != track_buffers_.end();
326 ++itr) {
327 itr->second->stream()->OnNewMediaSegment(segment_timestamp);
331 bool FrameProcessor::FlushProcessedFrames() {
332 DVLOG(2) << __FUNCTION__ << "()";
334 bool result = true;
335 for (TrackBufferMap::iterator itr = track_buffers_.begin();
336 itr != track_buffers_.end();
337 ++itr) {
338 if (!itr->second->FlushProcessedFrames())
339 result = false;
342 return result;
345 bool FrameProcessor::HandlePartialAppendWindowTrimming(
346 base::TimeDelta append_window_start,
347 base::TimeDelta append_window_end,
348 const scoped_refptr<StreamParserBuffer>& buffer) {
349 DCHECK(buffer->duration() > base::TimeDelta());
350 DCHECK_EQ(DemuxerStream::AUDIO, buffer->type());
351 DCHECK(buffer->is_key_frame());
353 const base::TimeDelta frame_end_timestamp =
354 buffer->timestamp() + buffer->duration();
356 // If the buffer is entirely before |append_window_start|, save it as preroll
357 // for the first buffer which overlaps |append_window_start|.
358 if (buffer->timestamp() < append_window_start &&
359 frame_end_timestamp <= append_window_start) {
360 audio_preroll_buffer_ = buffer;
361 return false;
364 // If the buffer is entirely after |append_window_end| there's nothing to do.
365 if (buffer->timestamp() >= append_window_end)
366 return false;
368 DCHECK(buffer->timestamp() >= append_window_start ||
369 frame_end_timestamp > append_window_start);
371 bool processed_buffer = false;
373 // If we have a preroll buffer see if we can attach it to the first buffer
374 // overlapping or after |append_window_start|.
375 if (audio_preroll_buffer_.get()) {
376 // We only want to use the preroll buffer if it directly precedes (less
377 // than one sample apart) the current buffer.
378 const int64 delta =
379 (audio_preroll_buffer_->timestamp() +
380 audio_preroll_buffer_->duration() - buffer->timestamp())
381 .InMicroseconds();
382 if (std::abs(delta) < sample_duration_.InMicroseconds()) {
383 DVLOG(1) << "Attaching audio preroll buffer ["
384 << audio_preroll_buffer_->timestamp().InSecondsF() << ", "
385 << (audio_preroll_buffer_->timestamp() +
386 audio_preroll_buffer_->duration()).InSecondsF() << ") to "
387 << buffer->timestamp().InSecondsF();
388 buffer->SetPrerollBuffer(audio_preroll_buffer_);
389 processed_buffer = true;
390 } else {
391 LIMITED_MEDIA_LOG(DEBUG, media_log_, num_dropped_preroll_warnings_,
392 kMaxDroppedPrerollWarnings)
393 << "Partial append window trimming dropping unused audio preroll "
394 "buffer with PTS "
395 << audio_preroll_buffer_->timestamp().InMicroseconds()
396 << "us that ends too far (" << delta
397 << "us) from next buffer with PTS "
398 << buffer->timestamp().InMicroseconds() << "us";
400 audio_preroll_buffer_ = NULL;
403 // See if a partial discard can be done around |append_window_start|.
404 if (buffer->timestamp() < append_window_start) {
405 DVLOG(1) << "Truncating buffer which overlaps append window start."
406 << " presentation_timestamp " << buffer->timestamp().InSecondsF()
407 << " frame_end_timestamp " << frame_end_timestamp.InSecondsF()
408 << " append_window_start " << append_window_start.InSecondsF();
410 // Mark the overlapping portion of the buffer for discard.
411 buffer->set_discard_padding(std::make_pair(
412 append_window_start - buffer->timestamp(), base::TimeDelta()));
414 // Adjust the timestamp of this buffer forward to |append_window_start| and
415 // decrease the duration to compensate. Adjust DTS by the same delta as PTS
416 // to help prevent spurious discontinuities when DTS > PTS.
417 base::TimeDelta pts_delta = append_window_start - buffer->timestamp();
418 buffer->set_timestamp(append_window_start);
419 buffer->SetDecodeTimestamp(buffer->GetDecodeTimestamp() + pts_delta);
420 buffer->set_duration(frame_end_timestamp - append_window_start);
421 processed_buffer = true;
424 // See if a partial discard can be done around |append_window_end|.
425 if (frame_end_timestamp > append_window_end) {
426 DVLOG(1) << "Truncating buffer which overlaps append window end."
427 << " presentation_timestamp " << buffer->timestamp().InSecondsF()
428 << " frame_end_timestamp " << frame_end_timestamp.InSecondsF()
429 << " append_window_end " << append_window_end.InSecondsF();
431 // Mark the overlapping portion of the buffer for discard.
432 buffer->set_discard_padding(
433 std::make_pair(buffer->discard_padding().first,
434 frame_end_timestamp - append_window_end));
436 // Decrease the duration of the buffer to remove the discarded portion.
437 buffer->set_duration(append_window_end - buffer->timestamp());
438 processed_buffer = true;
441 return processed_buffer;
444 bool FrameProcessor::ProcessFrame(
445 const scoped_refptr<StreamParserBuffer>& frame,
446 base::TimeDelta append_window_start,
447 base::TimeDelta append_window_end,
448 base::TimeDelta* timestamp_offset,
449 bool* new_media_segment) {
450 // Implements the loop within step 1 of the coded frame processing algorithm
451 // for a single input frame per April 1, 2014 MSE spec editor's draft:
452 // https://dvcs.w3.org/hg/html-media/raw-file/d471a4412040/media-source/
453 // media-source.html#sourcebuffer-coded-frame-processing
455 while (true) {
456 // 1. Loop Top: Let presentation timestamp be a double precision floating
457 // point representation of the coded frame's presentation timestamp in
458 // seconds.
459 // 2. Let decode timestamp be a double precision floating point
460 // representation of the coded frame's decode timestamp in seconds.
461 // 3. Let frame duration be a double precision floating point representation
462 // of the coded frame's duration in seconds.
463 // We use base::TimeDelta and DecodeTimestamp instead of double.
464 base::TimeDelta presentation_timestamp = frame->timestamp();
465 DecodeTimestamp decode_timestamp = frame->GetDecodeTimestamp();
466 base::TimeDelta frame_duration = frame->duration();
468 DVLOG(3) << __FUNCTION__ << ": Processing frame "
469 << "Type=" << frame->type()
470 << ", TrackID=" << frame->track_id()
471 << ", PTS=" << presentation_timestamp.InSecondsF()
472 << ", DTS=" << decode_timestamp.InSecondsF()
473 << ", DUR=" << frame_duration.InSecondsF()
474 << ", RAP=" << frame->is_key_frame();
476 // Sanity check the timestamps.
477 if (presentation_timestamp == kNoTimestamp()) {
478 MEDIA_LOG(ERROR, media_log_) << "Unknown PTS for " << frame->GetTypeName()
479 << " frame";
480 return false;
482 if (decode_timestamp == kNoDecodeTimestamp()) {
483 MEDIA_LOG(ERROR, media_log_) << "Unknown DTS for " << frame->GetTypeName()
484 << " frame";
485 return false;
487 if (decode_timestamp.ToPresentationTime() > presentation_timestamp) {
488 // TODO(wolenetz): Determine whether DTS>PTS should really be allowed. See
489 // http://crbug.com/354518.
490 LIMITED_MEDIA_LOG(DEBUG, media_log_, num_dts_beyond_pts_warnings_,
491 kMaxDtsBeyondPtsWarnings)
492 << "Parsed " << frame->GetTypeName() << " frame has DTS "
493 << decode_timestamp.InMicroseconds()
494 << "us, which is after the frame's PTS "
495 << presentation_timestamp.InMicroseconds() << "us";
496 DVLOG(2) << __FUNCTION__ << ": WARNING: Frame DTS("
497 << decode_timestamp.InSecondsF() << ") > PTS("
498 << presentation_timestamp.InSecondsF()
499 << "), frame type=" << frame->GetTypeName();
502 // TODO(acolwell/wolenetz): All stream parsers must emit valid (positive)
503 // frame durations. For now, we allow non-negative frame duration.
504 // See http://crbug.com/351166.
505 if (frame_duration == kNoTimestamp()) {
506 MEDIA_LOG(ERROR, media_log_)
507 << "Unknown duration for " << frame->GetTypeName() << " frame at PTS "
508 << presentation_timestamp.InMicroseconds() << "us";
509 return false;
511 if (frame_duration < base::TimeDelta()) {
512 MEDIA_LOG(ERROR, media_log_)
513 << "Negative duration " << frame_duration.InMicroseconds()
514 << "us for " << frame->GetTypeName() << " frame at PTS "
515 << presentation_timestamp.InMicroseconds() << "us";
516 return false;
519 // 4. If mode equals "sequence" and group start timestamp is set, then run
520 // the following steps:
521 if (sequence_mode_ && group_start_timestamp_ != kNoTimestamp()) {
522 // 4.1. Set timestampOffset equal to group start timestamp -
523 // presentation timestamp.
524 *timestamp_offset = group_start_timestamp_ - presentation_timestamp;
526 DVLOG(3) << __FUNCTION__ << ": updated timestampOffset is now "
527 << timestamp_offset->InSecondsF();
529 // 4.2. Set group end timestamp equal to group start timestamp.
530 group_end_timestamp_ = group_start_timestamp_;
532 // 4.3. Set the need random access point flag on all track buffers to
533 // true.
534 SetAllTrackBuffersNeedRandomAccessPoint();
536 // 4.4. Unset group start timestamp.
537 group_start_timestamp_ = kNoTimestamp();
540 // 5. If timestampOffset is not 0, then run the following steps:
541 if (*timestamp_offset != base::TimeDelta()) {
542 // 5.1. Add timestampOffset to the presentation timestamp.
543 // Note: |frame| PTS is only updated if it survives discontinuity
544 // processing.
545 presentation_timestamp += *timestamp_offset;
547 // 5.2. Add timestampOffset to the decode timestamp.
548 // Frame DTS is only updated if it survives discontinuity processing.
549 decode_timestamp += *timestamp_offset;
552 // 6. Let track buffer equal the track buffer that the coded frame will be
553 // added to.
555 // Remap audio and video track types to their special singleton identifiers.
556 StreamParser::TrackId track_id = kAudioTrackId;
557 switch (frame->type()) {
558 case DemuxerStream::AUDIO:
559 break;
560 case DemuxerStream::VIDEO:
561 track_id = kVideoTrackId;
562 break;
563 case DemuxerStream::TEXT:
564 track_id = frame->track_id();
565 break;
566 case DemuxerStream::UNKNOWN:
567 case DemuxerStream::NUM_TYPES:
568 DCHECK(false) << ": Invalid frame type " << frame->type();
569 return false;
572 MseTrackBuffer* track_buffer = FindTrack(track_id);
573 if (!track_buffer) {
574 MEDIA_LOG(ERROR, media_log_)
575 << "Unknown track with type " << frame->GetTypeName()
576 << ", frame processor track id " << track_id
577 << ", and parser track id " << frame->track_id();
578 return false;
581 // 7. If last decode timestamp for track buffer is set and decode timestamp
582 // is less than last decode timestamp
583 // OR
584 // If last decode timestamp for track buffer is set and the difference
585 // between decode timestamp and last decode timestamp is greater than 2
586 // times last frame duration:
587 DecodeTimestamp last_decode_timestamp =
588 track_buffer->last_decode_timestamp();
589 if (last_decode_timestamp != kNoDecodeTimestamp()) {
590 base::TimeDelta dts_delta = decode_timestamp - last_decode_timestamp;
591 if (dts_delta < base::TimeDelta() ||
592 dts_delta > 2 * track_buffer->last_frame_duration()) {
593 // 7.1. If mode equals "segments": Set group end timestamp to
594 // presentation timestamp.
595 // If mode equals "sequence": Set group start timestamp equal to
596 // the group end timestamp.
597 if (!sequence_mode_) {
598 group_end_timestamp_ = presentation_timestamp;
599 // This triggers a discontinuity so we need to treat the next frames
600 // appended within the append window as if they were the beginning of
601 // a new segment.
602 *new_media_segment = true;
603 } else {
604 DVLOG(3) << __FUNCTION__ << " : Sequence mode discontinuity, GETS: "
605 << group_end_timestamp_.InSecondsF();
606 DCHECK(kNoTimestamp() != group_end_timestamp_);
607 group_start_timestamp_ = group_end_timestamp_;
610 // 7.2. - 7.5.:
611 Reset();
613 // 7.6. Jump to the Loop Top step above to restart processing of the
614 // current coded frame.
615 DVLOG(3) << __FUNCTION__ << ": Discontinuity: reprocessing frame";
616 continue;
620 // 9. Let frame end timestamp equal the sum of presentation timestamp and
621 // frame duration.
622 base::TimeDelta frame_end_timestamp =
623 presentation_timestamp + frame_duration;
625 // 10. If presentation timestamp is less than appendWindowStart, then set
626 // the need random access point flag to true, drop the coded frame, and
627 // jump to the top of the loop to start processing the next coded
628 // frame.
629 // Note: We keep the result of partial discard of a buffer that overlaps
630 // |append_window_start| and does not end after |append_window_end|.
631 // 11. If frame end timestamp is greater than appendWindowEnd, then set the
632 // need random access point flag to true, drop the coded frame, and jump
633 // to the top of the loop to start processing the next coded frame.
634 frame->set_timestamp(presentation_timestamp);
635 frame->SetDecodeTimestamp(decode_timestamp);
636 if (track_buffer->stream()->supports_partial_append_window_trimming() &&
637 HandlePartialAppendWindowTrimming(append_window_start,
638 append_window_end,
639 frame)) {
640 // |frame| has been partially trimmed or had preroll added. Though
641 // |frame|'s duration may have changed, do not update |frame_duration|
642 // here, so |track_buffer|'s last frame duration update uses original
643 // frame duration and reduces spurious discontinuity detection.
644 decode_timestamp = frame->GetDecodeTimestamp();
645 presentation_timestamp = frame->timestamp();
646 frame_end_timestamp = frame->timestamp() + frame->duration();
649 if (presentation_timestamp < append_window_start ||
650 frame_end_timestamp > append_window_end) {
651 track_buffer->set_needs_random_access_point(true);
652 DVLOG(3) << "Dropping frame that is outside append window.";
653 return true;
656 // Note: This step is relocated, versus April 1 spec, to allow append window
657 // processing to first filter coded frames shifted by |timestamp_offset_| in
658 // such a way that their PTS is negative.
659 // 8. If the presentation timestamp or decode timestamp is less than the
660 // presentation start time, then run the end of stream algorithm with the
661 // error parameter set to "decode", and abort these steps.
662 DCHECK(presentation_timestamp >= base::TimeDelta());
663 if (decode_timestamp < DecodeTimestamp()) {
664 // B-frames may still result in negative DTS here after being shifted by
665 // |timestamp_offset_|.
666 MEDIA_LOG(ERROR, media_log_)
667 << frame->GetTypeName() << " frame with PTS "
668 << presentation_timestamp.InMicroseconds() << "us has negative DTS "
669 << decode_timestamp.InMicroseconds()
670 << "us after applying timestampOffset, handling any discontinuity, "
671 "and filtering against append window";
672 return false;
675 // 12. If the need random access point flag on track buffer equals true,
676 // then run the following steps:
677 if (track_buffer->needs_random_access_point()) {
678 // 12.1. If the coded frame is not a random access point, then drop the
679 // coded frame and jump to the top of the loop to start processing
680 // the next coded frame.
681 if (!frame->is_key_frame()) {
682 DVLOG(3) << __FUNCTION__
683 << ": Dropping frame that is not a random access point";
684 return true;
687 // 12.2. Set the need random access point flag on track buffer to false.
688 track_buffer->set_needs_random_access_point(false);
691 // We now have a processed buffer to append to the track buffer's stream.
692 // If it is the first in a new media segment or following a discontinuity,
693 // notify all the track buffers' streams that a new segment is beginning.
694 if (*new_media_segment) {
695 // First, complete the append to track buffer streams of previous media
696 // segment's frames, if any.
697 if (!FlushProcessedFrames())
698 return false;
700 *new_media_segment = false;
702 // TODO(acolwell/wolenetz): This should be changed to a presentation
703 // timestamp. See http://crbug.com/402502
704 NotifyNewMediaSegmentStarting(decode_timestamp);
707 DVLOG(3) << __FUNCTION__ << ": Sending processed frame to stream, "
708 << "PTS=" << presentation_timestamp.InSecondsF()
709 << ", DTS=" << decode_timestamp.InSecondsF();
711 // Steps 13-18: Note, we optimize by appending groups of contiguous
712 // processed frames for each track buffer at end of ProcessFrames() or prior
713 // to NotifyNewMediaSegmentStarting().
714 // TODO(wolenetz): Refactor SourceBufferStream to conform to spec GC timing.
715 // See http://crbug.com/371197.
716 track_buffer->EnqueueProcessedFrame(frame);
718 // 19. Set last decode timestamp for track buffer to decode timestamp.
719 track_buffer->set_last_decode_timestamp(decode_timestamp);
721 // 20. Set last frame duration for track buffer to frame duration.
722 track_buffer->set_last_frame_duration(frame_duration);
724 // 21. If highest presentation timestamp for track buffer is unset or frame
725 // end timestamp is greater than highest presentation timestamp, then
726 // set highest presentation timestamp for track buffer to frame end
727 // timestamp.
728 track_buffer->SetHighestPresentationTimestampIfIncreased(
729 frame_end_timestamp);
731 // 22. If frame end timestamp is greater than group end timestamp, then set
732 // group end timestamp equal to frame end timestamp.
733 if (frame_end_timestamp > group_end_timestamp_)
734 group_end_timestamp_ = frame_end_timestamp;
735 DCHECK(group_end_timestamp_ >= base::TimeDelta());
737 return true;
740 NOTREACHED();
741 return false;
744 } // namespace media