1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim:set ts=2 sw=2 sts=2 et cindent: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "MediaDecoderStateMachine.h"
13 #include "AudioSegment.h"
14 #include "DOMMediaStream.h"
15 #include "ImageContainer.h"
16 #include "MediaDecoder.h"
17 #include "MediaShutdownManager.h"
18 #include "MediaTimer.h"
19 #include "MediaTrackGraph.h"
20 #include "PerformanceRecorder.h"
21 #include "ReaderProxy.h"
22 #include "TimeUnits.h"
23 #include "VideoSegment.h"
24 #include "VideoUtils.h"
25 #include "mediasink/AudioSink.h"
26 #include "mediasink/AudioSinkWrapper.h"
27 #include "mediasink/DecodedStream.h"
28 #include "mediasink/VideoSink.h"
29 #include "mozilla/Logging.h"
30 #include "mozilla/MathAlgorithms.h"
31 #include "mozilla/NotNull.h"
32 #include "mozilla/Preferences.h"
33 #include "mozilla/ProfilerLabels.h"
34 #include "mozilla/ProfilerMarkerTypes.h"
35 #include "mozilla/ProfilerMarkers.h"
36 #include "mozilla/SharedThreadPool.h"
37 #include "mozilla/Sprintf.h"
38 #include "mozilla/StaticPrefs_media.h"
39 #include "mozilla/TaskQueue.h"
40 #include "mozilla/Telemetry.h"
41 #include "nsIMemoryReporter.h"
42 #include "nsPrintfCString.h"
47 using namespace mozilla::media
;
49 #define NS_DispatchToMainThread(...) \
50 CompileError_UseAbstractThreadDispatchInstead
52 // avoid redefined macro in unified build
63 #define FMT(x, ...) "Decoder=%p " x, mDecoderID, ##__VA_ARGS__
65 DDMOZ_LOG(gMediaDecoderLog, LogLevel::Debug, "Decoder=%p " x, mDecoderID, \
67 #define LOGV(x, ...) \
68 DDMOZ_LOG(gMediaDecoderLog, LogLevel::Verbose, "Decoder=%p " x, mDecoderID, \
70 #define LOGW(x, ...) NS_WARNING(nsPrintfCString(FMT(x, ##__VA_ARGS__)).get())
71 #define LOGE(x, ...) \
72 NS_DebugBreak(NS_DEBUG_WARNING, \
73 nsPrintfCString(FMT(x, ##__VA_ARGS__)).get(), nullptr, \
76 // Used by StateObject and its sub-classes
77 #define SFMT(x, ...) \
78 "Decoder=%p state=%s " x, mMaster->mDecoderID, ToStateStr(GetState()), \
80 #define SLOG(x, ...) \
81 DDMOZ_LOGEX(mMaster, gMediaDecoderLog, LogLevel::Debug, "state=%s " x, \
82 ToStateStr(GetState()), ##__VA_ARGS__)
83 #define SLOGW(x, ...) NS_WARNING(nsPrintfCString(SFMT(x, ##__VA_ARGS__)).get())
84 #define SLOGE(x, ...) \
85 NS_DebugBreak(NS_DEBUG_WARNING, \
86 nsPrintfCString(SFMT(x, ##__VA_ARGS__)).get(), nullptr, \
89 // Certain constants get stored as member variables and then adjusted by various
90 // scale factors on a per-decoder basis. We want to make sure to avoid using
91 // these constants directly, so we put them in a namespace.
94 // Resume a suspended video decoder to the current playback position plus this
95 // time premium for compensating the seeking delay.
96 static constexpr auto RESUME_VIDEO_PREMIUM
= TimeUnit::FromMicroseconds(125000);
98 static const int64_t AMPLE_AUDIO_USECS
= 2000000;
100 // If more than this much decoded audio is queued, we'll hold off
101 // decoding more audio.
102 static constexpr auto AMPLE_AUDIO_THRESHOLD
=
103 TimeUnit::FromMicroseconds(AMPLE_AUDIO_USECS
);
105 } // namespace detail
107 // If we have fewer than LOW_VIDEO_FRAMES decoded frames, and
108 // we're not "prerolling video", we'll skip the video up to the next keyframe
109 // which is at or after the current playback position.
110 static const uint32_t LOW_VIDEO_FRAMES
= 2;
112 // Arbitrary "frame duration" when playing only audio.
113 static const uint32_t AUDIO_DURATION_USECS
= 40000;
117 // If we have less than this much buffered data available, we'll consider
118 // ourselves to be running low on buffered data. We determine how much
119 // buffered data we have remaining using the reader's GetBuffered()
121 static const int64_t LOW_BUFFER_THRESHOLD_USECS
= 5000000;
123 static constexpr auto LOW_BUFFER_THRESHOLD
=
124 TimeUnit::FromMicroseconds(LOW_BUFFER_THRESHOLD_USECS
);
126 // LOW_BUFFER_THRESHOLD_USECS needs to be greater than AMPLE_AUDIO_USECS,
127 // otherwise the skip-to-keyframe logic can activate when we're running low on
129 static_assert(LOW_BUFFER_THRESHOLD_USECS
> AMPLE_AUDIO_USECS
,
130 "LOW_BUFFER_THRESHOLD_USECS is too small");
132 } // namespace detail
134 // Amount of excess data to add in to the "should we buffer" calculation.
135 static constexpr auto EXHAUSTED_DATA_MARGIN
=
136 TimeUnit::FromMicroseconds(100000);
138 static const uint32_t MIN_VIDEO_QUEUE_SIZE
= 3;
139 static const uint32_t MAX_VIDEO_QUEUE_SIZE
= 10;
140 #ifdef MOZ_APPLEMEDIA
141 static const uint32_t HW_VIDEO_QUEUE_SIZE
= 10;
143 static const uint32_t HW_VIDEO_QUEUE_SIZE
= 3;
145 static const uint32_t VIDEO_QUEUE_SEND_TO_COMPOSITOR_SIZE
= 9999;
147 static uint32_t sVideoQueueDefaultSize
= MAX_VIDEO_QUEUE_SIZE
;
148 static uint32_t sVideoQueueHWAccelSize
= HW_VIDEO_QUEUE_SIZE
;
149 static uint32_t sVideoQueueSendToCompositorSize
=
150 VIDEO_QUEUE_SEND_TO_COMPOSITOR_SIZE
;
152 static void InitVideoQueuePrefs() {
153 MOZ_ASSERT(NS_IsMainThread());
154 static bool sPrefInit
= false;
157 sVideoQueueDefaultSize
= Preferences::GetUint(
158 "media.video-queue.default-size", MAX_VIDEO_QUEUE_SIZE
);
159 sVideoQueueHWAccelSize
= Preferences::GetUint(
160 "media.video-queue.hw-accel-size", HW_VIDEO_QUEUE_SIZE
);
161 sVideoQueueSendToCompositorSize
=
162 Preferences::GetUint("media.video-queue.send-to-compositor-size",
163 VIDEO_QUEUE_SEND_TO_COMPOSITOR_SIZE
);
167 template <typename Type
, typename Function
>
168 static void DiscardFramesFromTail(MediaQueue
<Type
>& aQueue
,
169 const Function
&& aTest
) {
170 while (aQueue
.GetSize()) {
171 if (aTest(aQueue
.PeekBack()->mTime
.ToMicroseconds())) {
172 RefPtr
<Type
> releaseMe
= aQueue
.PopBack();
179 // Delay, in milliseconds, that tabs needs to be in background before video
180 // decoding is suspended.
181 static TimeDuration
SuspendBackgroundVideoDelay() {
182 return TimeDuration::FromMilliseconds(
183 StaticPrefs::media_suspend_background_video_delay_ms());
186 class MediaDecoderStateMachine::StateObject
{
188 virtual ~StateObject() = default;
189 virtual void Exit() {} // Exit action.
190 virtual void Step() {} // Perform a 'cycle' of this state object.
191 virtual State
GetState() const = 0;
193 // Event handlers for various events.
194 virtual void HandleAudioCaptured() {}
195 virtual void HandleAudioDecoded(AudioData
* aAudio
) {
196 Crash("Unexpected event!", __func__
);
198 virtual void HandleVideoDecoded(VideoData
* aVideo
) {
199 Crash("Unexpected event!", __func__
);
201 virtual void HandleAudioWaited(MediaData::Type aType
) {
202 Crash("Unexpected event!", __func__
);
204 virtual void HandleVideoWaited(MediaData::Type aType
) {
205 Crash("Unexpected event!", __func__
);
207 virtual void HandleWaitingForAudio() { Crash("Unexpected event!", __func__
); }
208 virtual void HandleAudioCanceled() { Crash("Unexpected event!", __func__
); }
209 virtual void HandleEndOfAudio() { Crash("Unexpected event!", __func__
); }
210 virtual void HandleWaitingForVideo() { Crash("Unexpected event!", __func__
); }
211 virtual void HandleVideoCanceled() { Crash("Unexpected event!", __func__
); }
212 virtual void HandleEndOfVideo() { Crash("Unexpected event!", __func__
); }
214 virtual RefPtr
<MediaDecoder::SeekPromise
> HandleSeek(
215 const SeekTarget
& aTarget
);
217 virtual RefPtr
<ShutdownPromise
> HandleShutdown();
219 virtual void HandleVideoSuspendTimeout() = 0;
221 virtual void HandleResumeVideoDecoding(const TimeUnit
& aTarget
);
223 virtual void HandlePlayStateChanged(MediaDecoder::PlayState aPlayState
) {}
225 virtual void GetDebugInfo(
226 dom::MediaDecoderStateMachineDecodingStateDebugInfo
& aInfo
) {}
228 virtual void HandleLoopingChanged() {}
231 template <class S
, typename R
, typename
... As
>
232 auto ReturnTypeHelper(R (S::*)(As
...)) -> R
;
234 void Crash(const char* aReason
, const char* aSite
) {
236 SprintfLiteral(buf
, "%s state=%s callsite=%s", aReason
,
237 ToStateStr(GetState()), aSite
);
238 MOZ_ReportAssertionFailure(buf
, __FILE__
, __LINE__
);
243 enum class EventVisibility
: int8_t { Observable
, Suppressed
};
245 using Master
= MediaDecoderStateMachine
;
246 explicit StateObject(Master
* aPtr
) : mMaster(aPtr
) {}
247 TaskQueue
* OwnerThread() const { return mMaster
->mTaskQueue
; }
248 ReaderProxy
* Reader() const { return mMaster
->mReader
; }
249 const MediaInfo
& Info() const { return mMaster
->Info(); }
250 MediaQueue
<AudioData
>& AudioQueue() const { return mMaster
->mAudioQueue
; }
251 MediaQueue
<VideoData
>& VideoQueue() const { return mMaster
->mVideoQueue
; }
253 template <class S
, typename
... Args
, size_t... Indexes
>
254 auto CallEnterMemberFunction(S
* aS
, std::tuple
<Args
...>& aTuple
,
255 std::index_sequence
<Indexes
...>)
256 -> decltype(ReturnTypeHelper(&S::Enter
)) {
257 AUTO_PROFILER_LABEL("StateObject::CallEnterMemberFunction", MEDIA_PLAYBACK
);
258 return aS
->Enter(std::move(std::get
<Indexes
>(aTuple
))...);
261 // Note this function will delete the current state object.
262 // Don't access members to avoid UAF after this call.
263 template <class S
, typename
... Ts
>
264 auto SetState(Ts
&&... aArgs
) -> decltype(ReturnTypeHelper(&S::Enter
)) {
265 // |aArgs| must be passed by reference to avoid passing MOZ_NON_PARAM class
266 // SeekJob by value. See bug 1287006 and bug 1338374. But we still *must*
267 // copy the parameters, because |Exit()| can modify them. See bug 1312321.
268 // So we 1) pass the parameters by reference, but then 2) immediately copy
269 // them into a Tuple to be safe against modification, and finally 3) move
270 // the elements of the Tuple into the final function call.
271 auto copiedArgs
= std::make_tuple(std::forward
<Ts
>(aArgs
)...);
273 // Copy mMaster which will reset to null.
274 auto* master
= mMaster
;
276 auto* s
= new S(master
);
278 // It's possible to seek again during seeking, otherwise the new state
279 // should always be different from the original one.
280 MOZ_ASSERT(GetState() != s
->GetState() ||
281 GetState() == DECODER_STATE_SEEKING_ACCURATE
||
282 GetState() == DECODER_STATE_SEEKING_FROMDORMANT
||
283 GetState() == DECODER_STATE_SEEKING_NEXTFRAMESEEKING
||
284 GetState() == DECODER_STATE_SEEKING_VIDEOONLY
);
286 SLOG("change state to: %s", ToStateStr(s
->GetState()));
287 PROFILER_MARKER_TEXT("MDSM::StateChange", MEDIA_PLAYBACK
, {},
288 nsPrintfCString("%s", ToStateStr(s
->GetState())));
292 // Delete the old state asynchronously to avoid UAF if the caller tries to
293 // access its members after SetState() returns.
294 master
->OwnerThread()->DispatchDirectTask(
295 NS_NewRunnableFunction("MDSM::StateObject::DeleteOldState",
296 [toDelete
= std::move(master
->mStateObj
)]() {}));
297 // Also reset mMaster to catch potentail UAF.
300 master
->mStateObj
.reset(s
);
301 return CallEnterMemberFunction(s
, copiedArgs
,
302 std::index_sequence_for
<Ts
...>{});
305 RefPtr
<MediaDecoder::SeekPromise
> SetSeekingState(
306 SeekJob
&& aSeekJob
, EventVisibility aVisibility
);
308 void SetDecodingState();
310 // Take a raw pointer in order not to change the life cycle of MDSM.
311 // It is guaranteed to be valid by MDSM.
316 * Purpose: decode metadata like duration and dimensions of the media resource.
318 * Transition to other states when decoding metadata is done:
319 * SHUTDOWN if failing to decode metadata.
320 * DECODING_FIRSTFRAME otherwise.
322 class MediaDecoderStateMachine::DecodeMetadataState
323 : public MediaDecoderStateMachine::StateObject
{
325 explicit DecodeMetadataState(Master
* aPtr
) : StateObject(aPtr
) {}
328 MOZ_ASSERT(!mMaster
->mVideoDecodeSuspended
);
329 MOZ_ASSERT(!mMetadataRequest
.Exists());
330 SLOG("Dispatching AsyncReadMetadata");
332 // We disconnect mMetadataRequest in Exit() so it is fine to capture
333 // a raw pointer here.
337 OwnerThread(), __func__
,
338 [this](MetadataHolder
&& aMetadata
) {
339 OnMetadataRead(std::move(aMetadata
));
341 [this](const MediaResult
& aError
) { OnMetadataNotRead(aError
); })
342 ->Track(mMetadataRequest
);
345 void Exit() override
{ mMetadataRequest
.DisconnectIfExists(); }
347 State
GetState() const override
{ return DECODER_STATE_DECODING_METADATA
; }
349 RefPtr
<MediaDecoder::SeekPromise
> HandleSeek(
350 const SeekTarget
& aTarget
) override
{
351 MOZ_DIAGNOSTIC_CRASH("Can't seek while decoding metadata.");
352 return MediaDecoder::SeekPromise::CreateAndReject(true, __func__
);
355 void HandleVideoSuspendTimeout() override
{
356 // Do nothing since no decoders are created yet.
359 void HandleResumeVideoDecoding(const TimeUnit
&) override
{
360 // We never suspend video decoding in this state.
361 MOZ_ASSERT(false, "Shouldn't have suspended video decoding.");
365 void OnMetadataRead(MetadataHolder
&& aMetadata
);
367 void OnMetadataNotRead(const MediaResult
& aError
) {
368 AUTO_PROFILER_LABEL("DecodeMetadataState::OnMetadataNotRead",
371 mMetadataRequest
.Complete();
372 SLOGE("Decode metadata failed, shutting down decoder");
373 mMaster
->DecodeError(aError
);
376 MozPromiseRequestHolder
<MediaFormatReader::MetadataPromise
> mMetadataRequest
;
380 * Purpose: release decoder resources to save memory and hardware resources.
383 * SEEKING if any seek request or play state changes to PLAYING.
385 class MediaDecoderStateMachine::DormantState
386 : public MediaDecoderStateMachine::StateObject
{
388 explicit DormantState(Master
* aPtr
) : StateObject(aPtr
) {}
391 if (mMaster
->IsPlaying()) {
392 mMaster
->StopPlayback();
395 // Calculate the position to seek to when exiting dormant.
396 auto t
= mMaster
->mMediaSink
->IsStarted() ? mMaster
->GetClock()
397 : mMaster
->GetMediaTime();
398 mMaster
->AdjustByLooping(t
);
399 mPendingSeek
.mTarget
.emplace(t
, SeekTarget::Accurate
);
400 // SeekJob asserts |mTarget.IsValid() == !mPromise.IsEmpty()| so we
401 // need to create the promise even it is not used at all.
402 // The promise may be used when coming out of DormantState into
404 RefPtr
<MediaDecoder::SeekPromise
> x
=
405 mPendingSeek
.mPromise
.Ensure(__func__
);
407 // Reset the decoding state to ensure that any queued video frames are
408 // released and don't consume video memory.
409 mMaster
->ResetDecode();
411 // No need to call StopMediaSink() here.
412 // We will do it during seeking when exiting dormant.
414 // Ignore WAIT_FOR_DATA since we won't decode in dormant.
415 mMaster
->mAudioWaitRequest
.DisconnectIfExists();
416 mMaster
->mVideoWaitRequest
.DisconnectIfExists();
418 MaybeReleaseResources();
421 void Exit() override
{
422 // mPendingSeek is either moved when exiting dormant or
423 // should be rejected here before transition to SHUTDOWN.
424 mPendingSeek
.RejectIfExists(__func__
);
427 State
GetState() const override
{ return DECODER_STATE_DORMANT
; }
429 RefPtr
<MediaDecoder::SeekPromise
> HandleSeek(
430 const SeekTarget
& aTarget
) override
;
432 void HandleVideoSuspendTimeout() override
{
433 // Do nothing since we've released decoders in Enter().
436 void HandleResumeVideoDecoding(const TimeUnit
&) override
{
437 // Do nothing since we won't resume decoding until exiting dormant.
440 void HandlePlayStateChanged(MediaDecoder::PlayState aPlayState
) override
;
442 void HandleAudioDecoded(AudioData
*) override
{ MaybeReleaseResources(); }
443 void HandleVideoDecoded(VideoData
*) override
{ MaybeReleaseResources(); }
444 void HandleWaitingForAudio() override
{ MaybeReleaseResources(); }
445 void HandleWaitingForVideo() override
{ MaybeReleaseResources(); }
446 void HandleAudioCanceled() override
{ MaybeReleaseResources(); }
447 void HandleVideoCanceled() override
{ MaybeReleaseResources(); }
448 void HandleEndOfAudio() override
{ MaybeReleaseResources(); }
449 void HandleEndOfVideo() override
{ MaybeReleaseResources(); }
452 void MaybeReleaseResources() {
453 if (!mMaster
->mAudioDataRequest
.Exists() &&
454 !mMaster
->mVideoDataRequest
.Exists()) {
455 // Release decoders only when they are idle. Otherwise it might cause
456 // decode error later when resetting decoders during seeking.
457 mMaster
->mReader
->ReleaseResources();
461 SeekJob mPendingSeek
;
465 * Purpose: decode the 1st audio and video frames to fire the 'loadeddata'
469 * SHUTDOWN if any decode error.
470 * SEEKING if any seek request.
471 * DECODING/LOOPING_DECODING when the 'loadeddata' event is fired.
473 class MediaDecoderStateMachine::DecodingFirstFrameState
474 : public MediaDecoderStateMachine::StateObject
{
476 explicit DecodingFirstFrameState(Master
* aPtr
) : StateObject(aPtr
) {}
480 void Exit() override
{
481 // mPendingSeek is either moved in MaybeFinishDecodeFirstFrame()
482 // or should be rejected here before transition to SHUTDOWN.
483 mPendingSeek
.RejectIfExists(__func__
);
486 State
GetState() const override
{ return DECODER_STATE_DECODING_FIRSTFRAME
; }
488 void HandleAudioDecoded(AudioData
* aAudio
) override
{
489 mMaster
->PushAudio(aAudio
);
490 MaybeFinishDecodeFirstFrame();
493 void HandleVideoDecoded(VideoData
* aVideo
) override
{
494 mMaster
->PushVideo(aVideo
);
495 MaybeFinishDecodeFirstFrame();
498 void HandleWaitingForAudio() override
{
499 mMaster
->WaitForData(MediaData::Type::AUDIO_DATA
);
502 void HandleAudioCanceled() override
{ mMaster
->RequestAudioData(); }
504 void HandleEndOfAudio() override
{
505 AudioQueue().Finish();
506 MaybeFinishDecodeFirstFrame();
509 void HandleWaitingForVideo() override
{
510 mMaster
->WaitForData(MediaData::Type::VIDEO_DATA
);
513 void HandleVideoCanceled() override
{
514 mMaster
->RequestVideoData(media::TimeUnit());
517 void HandleEndOfVideo() override
{
518 VideoQueue().Finish();
519 MaybeFinishDecodeFirstFrame();
522 void HandleAudioWaited(MediaData::Type aType
) override
{
523 mMaster
->RequestAudioData();
526 void HandleVideoWaited(MediaData::Type aType
) override
{
527 mMaster
->RequestVideoData(media::TimeUnit());
530 void HandleVideoSuspendTimeout() override
{
531 // Do nothing for we need to decode the 1st video frame to get the
535 void HandleResumeVideoDecoding(const TimeUnit
&) override
{
536 // We never suspend video decoding in this state.
537 MOZ_ASSERT(false, "Shouldn't have suspended video decoding.");
540 RefPtr
<MediaDecoder::SeekPromise
> HandleSeek(
541 const SeekTarget
& aTarget
) override
{
542 if (mMaster
->mIsMSE
) {
543 return StateObject::HandleSeek(aTarget
);
545 // Delay seek request until decoding first frames for non-MSE media.
546 SLOG("Not Enough Data to seek at this stage, queuing seek");
547 mPendingSeek
.RejectIfExists(__func__
);
548 mPendingSeek
.mTarget
.emplace(aTarget
);
549 return mPendingSeek
.mPromise
.Ensure(__func__
);
553 // Notify FirstFrameLoaded if having decoded first frames and
554 // transition to SEEKING if there is any pending seek, or DECODING otherwise.
555 void MaybeFinishDecodeFirstFrame();
557 SeekJob mPendingSeek
;
561 * Purpose: decode audio/video data for playback.
564 * DORMANT if playback is paused for a while.
565 * SEEKING if any seek request.
566 * SHUTDOWN if any decode error.
567 * BUFFERING if playback can't continue due to lack of decoded data.
568 * COMPLETED when having decoded all audio/video data.
569 * LOOPING_DECODING when media start seamless looping
571 class MediaDecoderStateMachine::DecodingState
572 : public MediaDecoderStateMachine::StateObject
{
574 explicit DecodingState(Master
* aPtr
)
575 : StateObject(aPtr
), mDormantTimer(OwnerThread()) {}
579 void Exit() override
{
580 if (!mDecodeStartTime
.IsNull()) {
581 TimeDuration decodeDuration
= TimeStamp::Now() - mDecodeStartTime
;
582 SLOG("Exiting DECODING, decoded for %.3lfs", decodeDuration
.ToSeconds());
584 mDormantTimer
.Reset();
585 mOnAudioPopped
.DisconnectIfExists();
586 mOnVideoPopped
.DisconnectIfExists();
589 void Step() override
;
591 State
GetState() const override
{ return DECODER_STATE_DECODING
; }
593 void HandleAudioDecoded(AudioData
* aAudio
) override
{
594 mMaster
->PushAudio(aAudio
);
595 DispatchDecodeTasksIfNeeded();
596 MaybeStopPrerolling();
599 void HandleVideoDecoded(VideoData
* aVideo
) override
{
600 // We only do this check when we're not looping, which can be known by
601 // checking the queue's offset.
602 const auto currentTime
= mMaster
->GetMediaTime();
603 if (aVideo
->GetEndTime() < currentTime
&&
604 VideoQueue().GetOffset() == media::TimeUnit::Zero()) {
605 if (!mVideoFirstLateTime
) {
606 mVideoFirstLateTime
= Some(TimeStamp::Now());
608 PROFILER_MARKER("Video falling behind", MEDIA_PLAYBACK
, {},
609 VideoFallingBehindMarker
, aVideo
->mTime
.ToMicroseconds(),
610 currentTime
.ToMicroseconds());
611 SLOG("video %" PRId64
" starts being late (current=%" PRId64
")",
612 aVideo
->mTime
.ToMicroseconds(), currentTime
.ToMicroseconds());
614 mVideoFirstLateTime
.reset();
616 mMaster
->PushVideo(aVideo
);
617 DispatchDecodeTasksIfNeeded();
618 MaybeStopPrerolling();
621 void HandleAudioCanceled() override
{ mMaster
->RequestAudioData(); }
623 void HandleVideoCanceled() override
{
624 mMaster
->RequestVideoData(mMaster
->GetMediaTime(),
625 ShouldRequestNextKeyFrame());
628 void HandleEndOfAudio() override
;
629 void HandleEndOfVideo() override
;
631 void HandleWaitingForAudio() override
{
632 mMaster
->WaitForData(MediaData::Type::AUDIO_DATA
);
633 MaybeStopPrerolling();
636 void HandleWaitingForVideo() override
{
637 mMaster
->WaitForData(MediaData::Type::VIDEO_DATA
);
638 MaybeStopPrerolling();
641 void HandleAudioWaited(MediaData::Type aType
) override
{
642 mMaster
->RequestAudioData();
645 void HandleVideoWaited(MediaData::Type aType
) override
{
646 mMaster
->RequestVideoData(mMaster
->GetMediaTime(),
647 ShouldRequestNextKeyFrame());
650 void HandleAudioCaptured() override
{
651 MaybeStopPrerolling();
652 // MediaSink is changed. Schedule Step() to check if we can start playback.
653 mMaster
->ScheduleStateMachine();
656 void HandleVideoSuspendTimeout() override
{
657 // No video, so nothing to suspend.
658 if (!mMaster
->HasVideo()) {
662 PROFILER_MARKER_UNTYPED("MDSM::EnterVideoSuspend", MEDIA_PLAYBACK
);
663 mMaster
->mVideoDecodeSuspended
= true;
664 mMaster
->mOnPlaybackEvent
.Notify(MediaPlaybackEvent::EnterVideoSuspend
);
665 Reader()->SetVideoBlankDecode(true);
668 void HandlePlayStateChanged(MediaDecoder::PlayState aPlayState
) override
{
669 // Schedule Step() to check if we can start or stop playback.
670 mMaster
->ScheduleStateMachine();
671 if (aPlayState
== MediaDecoder::PLAY_STATE_PLAYING
) {
672 // Try to dispatch decoding tasks for mMinimizePreroll might be reset.
673 DispatchDecodeTasksIfNeeded();
676 if (aPlayState
== MediaDecoder::PLAY_STATE_PAUSED
) {
678 mVideoFirstLateTime
.reset();
680 mDormantTimer
.Reset();
685 dom::MediaDecoderStateMachineDecodingStateDebugInfo
& aInfo
) override
{
686 aInfo
.mIsPrerolling
= mIsPrerolling
;
689 void HandleLoopingChanged() override
{ SetDecodingState(); }
692 virtual void EnsureAudioDecodeTaskQueued();
693 virtual void EnsureVideoDecodeTaskQueued();
695 virtual bool ShouldStopPrerolling() const {
696 return mIsPrerolling
&&
697 (DonePrerollingAudio() ||
698 IsWaitingData(MediaData::Type::AUDIO_DATA
)) &&
699 (DonePrerollingVideo() ||
700 IsWaitingData(MediaData::Type::VIDEO_DATA
));
703 virtual bool IsWaitingData(MediaData::Type aType
) const {
704 if (aType
== MediaData::Type::AUDIO_DATA
) {
705 return mMaster
->IsWaitingAudioData();
707 MOZ_ASSERT(aType
== MediaData::Type::VIDEO_DATA
);
708 return mMaster
->IsWaitingVideoData();
711 void MaybeStopPrerolling() {
712 if (ShouldStopPrerolling()) {
713 mIsPrerolling
= false;
714 // Check if we can start playback.
715 mMaster
->ScheduleStateMachine();
719 bool ShouldRequestNextKeyFrame() const {
720 if (!mVideoFirstLateTime
) {
723 const double elapsedTimeMs
=
724 (TimeStamp::Now() - *mVideoFirstLateTime
).ToMilliseconds();
725 const bool rv
= elapsedTimeMs
>=
726 StaticPrefs::media_decoder_skip_when_video_too_slow_ms();
728 PROFILER_MARKER_UNTYPED("Skipping to next keyframe", MEDIA_PLAYBACK
);
730 "video has been late behind media time for %f ms, should skip to "
737 virtual bool IsBufferingAllowed() const { return true; }
740 void DispatchDecodeTasksIfNeeded();
741 void MaybeStartBuffering();
743 // At the start of decoding we want to "preroll" the decode until we've
744 // got a few frames decoded before we consider whether decode is falling
745 // behind. Otherwise our "we're falling behind" logic will trigger
746 // unnecessarily if we start playing as soon as the first sample is
747 // decoded. These two fields store how many video frames and audio
748 // samples we must consume before are considered to be finished prerolling.
749 TimeUnit
AudioPrerollThreshold() const {
750 return (mMaster
->mAmpleAudioThreshold
/ 2)
751 .MultDouble(mMaster
->mPlaybackRate
);
754 uint32_t VideoPrerollFrames() const {
756 static_cast<uint32_t>(
757 mMaster
->GetAmpleVideoFrames() / 2. * mMaster
->mPlaybackRate
+ 1),
758 sVideoQueueDefaultSize
);
761 bool DonePrerollingAudio() const {
762 return !mMaster
->IsAudioDecoding() ||
763 mMaster
->GetDecodedAudioDuration() >= AudioPrerollThreshold();
766 bool DonePrerollingVideo() const {
767 return !mMaster
->IsVideoDecoding() ||
768 static_cast<uint32_t>(mMaster
->VideoQueue().GetSize()) >=
769 VideoPrerollFrames();
772 void StartDormantTimer() {
773 if (!mMaster
->mMediaSeekable
) {
774 // Don't enter dormant if the media is not seekable because we need to
775 // seek when exiting dormant.
779 auto timeout
= StaticPrefs::media_dormant_on_pause_timeout_ms();
781 // Disabled when timeout is negative.
786 // Enter dormant immediately without scheduling a timer.
787 SetState
<DormantState
>();
791 if (mMaster
->mMinimizePreroll
) {
792 SetState
<DormantState
>();
797 TimeStamp::Now() + TimeDuration::FromMilliseconds(timeout
);
799 mDormantTimer
.Ensure(
802 AUTO_PROFILER_LABEL("DecodingState::StartDormantTimer:SetDormant",
804 mDormantTimer
.CompleteRequest();
805 SetState
<DormantState
>();
807 [this]() { mDormantTimer
.CompleteRequest(); });
810 // Time at which we started decoding.
811 TimeStamp mDecodeStartTime
;
813 // When we start decoding (either for the first time, or after a pause)
814 // we may be low on decoded data. We don't want our "low data" logic to
815 // kick in and decide that we're low on decoded data because the download
816 // can't keep up with the decode, and cause us to pause playback. So we
817 // have a "preroll" stage, where we ignore the results of our "low data"
818 // logic during the first few frames of our decode. This occurs during
820 bool mIsPrerolling
= true;
822 // Fired when playback is paused for a while to enter dormant.
823 DelayedScheduler
<TimeStamp
> mDormantTimer
;
825 MediaEventListener mOnAudioPopped
;
826 MediaEventListener mOnVideoPopped
;
828 // If video has been later than the media time, this will records when the
829 // video started being late. It will be reset once video catches up with the
831 Maybe
<TimeStamp
> mVideoFirstLateTime
;
835 * Purpose: decode audio data for playback when media is in seamless
836 * looping, we will adjust media time to make samples time monotonically
837 * increasing. All its methods runs on its owner thread (MDSM thread).
840 * DORMANT if playback is paused for a while.
841 * SEEKING if any seek request.
842 * SHUTDOWN if any decode error.
843 * BUFFERING if playback can't continue due to lack of decoded data.
844 * COMPLETED when the media resource is closed and no data is available
846 * DECODING when media stops seamless looping.
848 class MediaDecoderStateMachine::LoopingDecodingState
849 : public MediaDecoderStateMachine::DecodingState
{
851 explicit LoopingDecodingState(Master
* aPtr
)
852 : DecodingState(aPtr
),
853 mIsReachingAudioEOS(!mMaster
->IsAudioDecoding()),
854 mIsReachingVideoEOS(!mMaster
->IsVideoDecoding()),
855 mAudioEndedBeforeEnteringStateWithoutDuration(false),
856 mVideoEndedBeforeEnteringStateWithoutDuration(false) {
857 MOZ_ASSERT(mMaster
->mLooping
);
859 "LoopingDecodingState ctor, mIsReachingAudioEOS=%d, "
860 "mIsReachingVideoEOS=%d",
861 mIsReachingAudioEOS
, mIsReachingVideoEOS
);
862 // If the track has reached EOS and we already have its last data, then we
863 // can know its duration. But if playback starts from EOS (due to seeking),
864 // the decoded end time would be zero because none of data gets decoded yet.
865 if (mIsReachingAudioEOS
) {
866 if (mMaster
->HasLastDecodedData(MediaData::Type::AUDIO_DATA
) &&
867 !mMaster
->mAudioTrackDecodedDuration
) {
868 mMaster
->mAudioTrackDecodedDuration
.emplace(
869 mMaster
->mDecodedAudioEndTime
);
870 SLOG("determine mAudioTrackDecodedDuration");
872 mAudioEndedBeforeEnteringStateWithoutDuration
= true;
873 SLOG("still don't know mAudioTrackDecodedDuration");
877 if (mIsReachingVideoEOS
) {
878 if (mMaster
->HasLastDecodedData(MediaData::Type::VIDEO_DATA
) &&
879 !mMaster
->mVideoTrackDecodedDuration
) {
880 mMaster
->mVideoTrackDecodedDuration
.emplace(
881 mMaster
->mDecodedVideoEndTime
);
882 SLOG("determine mVideoTrackDecodedDuration");
884 mVideoEndedBeforeEnteringStateWithoutDuration
= true;
885 SLOG("still don't know mVideoTrackDecodedDuration");
889 // We might be able to determine the duration already, let's check.
890 if (mIsReachingAudioEOS
|| mIsReachingVideoEOS
) {
891 Unused
<< DetermineOriginalDecodedDurationIfNeeded();
894 // If we've looped at least once before, then we need to update queue offset
895 // correctly to make the media data time and the clock time consistent.
896 // Otherwise, it would cause a/v desync.
897 if (mMaster
->mOriginalDecodedDuration
!= media::TimeUnit::Zero()) {
898 if (mIsReachingAudioEOS
&& mMaster
->HasAudio()) {
899 AudioQueue().SetOffset(AudioQueue().GetOffset() +
900 mMaster
->mOriginalDecodedDuration
);
902 if (mIsReachingVideoEOS
&& mMaster
->HasVideo()) {
903 VideoQueue().SetOffset(VideoQueue().GetOffset() +
904 mMaster
->mOriginalDecodedDuration
);
910 if (mMaster
->HasAudio() && mIsReachingAudioEOS
) {
911 SLOG("audio has ended, request the data again.");
912 RequestDataFromStartPosition(TrackInfo::TrackType::kAudioTrack
);
914 if (mMaster
->HasVideo() && mIsReachingVideoEOS
) {
915 SLOG("video has ended, request the data again.");
916 RequestDataFromStartPosition(TrackInfo::TrackType::kVideoTrack
);
918 DecodingState::Enter();
921 void Exit() override
{
922 MOZ_DIAGNOSTIC_ASSERT(mMaster
->OnTaskQueue());
923 SLOG("Leaving looping state, offset [a=%" PRId64
",v=%" PRId64
924 "], endtime [a=%" PRId64
",v=%" PRId64
"], track duration [a=%" PRId64
925 ",v=%" PRId64
"], waiting=%s",
926 AudioQueue().GetOffset().ToMicroseconds(),
927 VideoQueue().GetOffset().ToMicroseconds(),
928 mMaster
->mDecodedAudioEndTime
.ToMicroseconds(),
929 mMaster
->mDecodedVideoEndTime
.ToMicroseconds(),
930 mMaster
->mAudioTrackDecodedDuration
931 ? mMaster
->mAudioTrackDecodedDuration
->ToMicroseconds()
933 mMaster
->mVideoTrackDecodedDuration
934 ? mMaster
->mVideoTrackDecodedDuration
->ToMicroseconds()
936 mDataWaitingTimestampAdjustment
937 ? MediaData::EnumValueToString(
938 mDataWaitingTimestampAdjustment
->mType
)
940 if (ShouldDiscardLoopedData(MediaData::Type::AUDIO_DATA
)) {
941 DiscardLoopedData(MediaData::Type::AUDIO_DATA
);
943 if (ShouldDiscardLoopedData(MediaData::Type::VIDEO_DATA
)) {
944 DiscardLoopedData(MediaData::Type::VIDEO_DATA
);
947 if (mMaster
->HasAudio() && HasDecodedLastAudioFrame()) {
948 SLOG("Mark audio queue as finished");
949 mMaster
->mAudioDataRequest
.DisconnectIfExists();
950 mMaster
->mAudioWaitRequest
.DisconnectIfExists();
951 AudioQueue().Finish();
953 if (mMaster
->HasVideo() && HasDecodedLastVideoFrame()) {
954 SLOG("Mark video queue as finished");
955 mMaster
->mVideoDataRequest
.DisconnectIfExists();
956 mMaster
->mVideoWaitRequest
.DisconnectIfExists();
957 VideoQueue().Finish();
960 // Clear waiting data should be done after marking queue as finished.
961 mDataWaitingTimestampAdjustment
= nullptr;
963 mAudioDataRequest
.DisconnectIfExists();
964 mVideoDataRequest
.DisconnectIfExists();
965 mAudioSeekRequest
.DisconnectIfExists();
966 mVideoSeekRequest
.DisconnectIfExists();
967 DecodingState::Exit();
970 ~LoopingDecodingState() {
971 MOZ_DIAGNOSTIC_ASSERT(!mAudioDataRequest
.Exists());
972 MOZ_DIAGNOSTIC_ASSERT(!mVideoDataRequest
.Exists());
973 MOZ_DIAGNOSTIC_ASSERT(!mAudioSeekRequest
.Exists());
974 MOZ_DIAGNOSTIC_ASSERT(!mVideoSeekRequest
.Exists());
977 State
GetState() const override
{ return DECODER_STATE_LOOPING_DECODING
; }
979 void HandleAudioDecoded(AudioData
* aAudio
) override
{
980 // TODO : check if we need to update mOriginalDecodedDuration
982 // After pushing data to the queue, timestamp might be adjusted.
983 DecodingState::HandleAudioDecoded(aAudio
);
984 mMaster
->mDecodedAudioEndTime
=
985 std::max(aAudio
->GetEndTime(), mMaster
->mDecodedAudioEndTime
);
986 SLOG("audio sample after time-adjustment [%" PRId64
",%" PRId64
"]",
987 aAudio
->mTime
.ToMicroseconds(), aAudio
->GetEndTime().ToMicroseconds());
990 void HandleVideoDecoded(VideoData
* aVideo
) override
{
991 // TODO : check if we need to update mOriginalDecodedDuration
993 // Here sample still keeps its original timestamp.
995 // This indicates there is a shorter audio track, and it's the first time in
996 // the looping (audio ends but video is playing) so that we haven't been
997 // able to determine the decoded duration. Therefore, we fill the gap
998 // between two tracks before video ends. Afterward, this adjustment will be
999 // done in `HandleEndOfAudio()`.
1000 if (mMaster
->mOriginalDecodedDuration
== media::TimeUnit::Zero() &&
1001 mMaster
->mAudioTrackDecodedDuration
&&
1002 aVideo
->GetEndTime() > *mMaster
->mAudioTrackDecodedDuration
) {
1003 media::TimeUnit gap
;
1004 // First time we fill gap between the video frame to the last audio.
1005 if (auto prevVideo
= VideoQueue().PeekBack();
1007 prevVideo
->GetEndTime() < *mMaster
->mAudioTrackDecodedDuration
) {
1009 aVideo
->GetEndTime().ToBase(*mMaster
->mAudioTrackDecodedDuration
) -
1010 *mMaster
->mAudioTrackDecodedDuration
;
1012 // Then fill the gap for all following videos.
1014 gap
= aVideo
->mDuration
.ToBase(*mMaster
->mAudioTrackDecodedDuration
);
1016 SLOG("Longer video %" PRId64
"%s (audio-durtaion=%" PRId64
1017 "%s), insert silence to fill the gap %" PRId64
"%s",
1018 aVideo
->GetEndTime().ToMicroseconds(),
1019 aVideo
->GetEndTime().ToString().get(),
1020 mMaster
->mAudioTrackDecodedDuration
->ToMicroseconds(),
1021 mMaster
->mAudioTrackDecodedDuration
->ToString().get(),
1022 gap
.ToMicroseconds(), gap
.ToString().get());
1023 PushFakeAudioDataIfNeeded(gap
);
1026 // After pushing data to the queue, timestamp might be adjusted.
1027 DecodingState::HandleVideoDecoded(aVideo
);
1028 mMaster
->mDecodedVideoEndTime
=
1029 std::max(aVideo
->GetEndTime(), mMaster
->mDecodedVideoEndTime
);
1030 SLOG("video sample after time-adjustment [%" PRId64
",%" PRId64
"]",
1031 aVideo
->mTime
.ToMicroseconds(), aVideo
->GetEndTime().ToMicroseconds());
1034 void HandleEndOfAudio() override
{
1035 mIsReachingAudioEOS
= true;
1036 if (!mMaster
->mAudioTrackDecodedDuration
&&
1037 mMaster
->HasLastDecodedData(MediaData::Type::AUDIO_DATA
)) {
1038 mMaster
->mAudioTrackDecodedDuration
.emplace(
1039 mMaster
->mDecodedAudioEndTime
);
1041 if (DetermineOriginalDecodedDurationIfNeeded()) {
1042 AudioQueue().SetOffset(AudioQueue().GetOffset() +
1043 mMaster
->mOriginalDecodedDuration
);
1046 // This indicates that the audio track is shorter than the video track, so
1047 // we need to add some silence to fill the gap.
1048 if (mMaster
->mAudioTrackDecodedDuration
&&
1049 mMaster
->mOriginalDecodedDuration
>
1050 *mMaster
->mAudioTrackDecodedDuration
) {
1051 MOZ_ASSERT(mMaster
->HasVideo());
1052 MOZ_ASSERT(mMaster
->mVideoTrackDecodedDuration
);
1053 MOZ_ASSERT(mMaster
->mOriginalDecodedDuration
==
1054 *mMaster
->mVideoTrackDecodedDuration
);
1055 auto gap
= mMaster
->mOriginalDecodedDuration
.ToBase(
1056 *mMaster
->mAudioTrackDecodedDuration
) -
1057 *mMaster
->mAudioTrackDecodedDuration
;
1059 "Audio track is shorter than the original decoded duration "
1060 "(a=%" PRId64
"%s, t=%" PRId64
1061 "%s), insert silence to fill the gap %" PRId64
"%s",
1062 mMaster
->mAudioTrackDecodedDuration
->ToMicroseconds(),
1063 mMaster
->mAudioTrackDecodedDuration
->ToString().get(),
1064 mMaster
->mOriginalDecodedDuration
.ToMicroseconds(),
1065 mMaster
->mOriginalDecodedDuration
.ToString().get(),
1066 gap
.ToMicroseconds(), gap
.ToString().get());
1067 PushFakeAudioDataIfNeeded(gap
);
1071 "received audio EOS when seamless looping, starts seeking, "
1072 "audioLoopingOffset=[%" PRId64
"], mAudioTrackDecodedDuration=[%" PRId64
1074 AudioQueue().GetOffset().ToMicroseconds(),
1075 mMaster
->mAudioTrackDecodedDuration
->ToMicroseconds());
1076 if (!IsRequestingDataFromStartPosition(MediaData::Type::AUDIO_DATA
)) {
1077 RequestDataFromStartPosition(TrackInfo::TrackType::kAudioTrack
);
1079 ProcessSamplesWaitingAdjustmentIfAny();
1082 void HandleEndOfVideo() override
{
1083 mIsReachingVideoEOS
= true;
1084 if (!mMaster
->mVideoTrackDecodedDuration
&&
1085 mMaster
->HasLastDecodedData(MediaData::Type::VIDEO_DATA
)) {
1086 mMaster
->mVideoTrackDecodedDuration
.emplace(
1087 mMaster
->mDecodedVideoEndTime
);
1089 if (DetermineOriginalDecodedDurationIfNeeded()) {
1090 VideoQueue().SetOffset(VideoQueue().GetOffset() +
1091 mMaster
->mOriginalDecodedDuration
);
1095 "received video EOS when seamless looping, starts seeking, "
1096 "videoLoopingOffset=[%" PRId64
"], mVideoTrackDecodedDuration=[%" PRId64
1098 VideoQueue().GetOffset().ToMicroseconds(),
1099 mMaster
->mVideoTrackDecodedDuration
->ToMicroseconds());
1100 if (!IsRequestingDataFromStartPosition(MediaData::Type::VIDEO_DATA
)) {
1101 RequestDataFromStartPosition(TrackInfo::TrackType::kVideoTrack
);
1103 ProcessSamplesWaitingAdjustmentIfAny();
1107 void RequestDataFromStartPosition(TrackInfo::TrackType aType
) {
1108 MOZ_DIAGNOSTIC_ASSERT(aType
== TrackInfo::TrackType::kAudioTrack
||
1109 aType
== TrackInfo::TrackType::kVideoTrack
);
1111 const bool isAudio
= aType
== TrackInfo::TrackType::kAudioTrack
;
1112 MOZ_ASSERT_IF(isAudio
, mMaster
->HasAudio());
1113 MOZ_ASSERT_IF(!isAudio
, mMaster
->HasVideo());
1115 if (IsReaderSeeking()) {
1116 MOZ_ASSERT(!mPendingSeekingType
);
1117 mPendingSeekingType
= Some(aType
);
1118 SLOG("Delay %s seeking until the reader finishes current seeking",
1119 isAudio
? "audio" : "video");
1123 auto& seekRequest
= isAudio
? mAudioSeekRequest
: mVideoSeekRequest
;
1124 Reader()->ResetDecode(aType
);
1126 ->Seek(SeekTarget(media::TimeUnit::Zero(), SeekTarget::Type::Accurate
,
1127 isAudio
? SeekTarget::Track::AudioOnly
1128 : SeekTarget::Track::VideoOnly
))
1130 OwnerThread(), __func__
,
1131 [this, isAudio
, master
= RefPtr
{mMaster
}]() mutable -> void {
1132 AUTO_PROFILER_LABEL(
1134 "LoopingDecodingState::RequestDataFromStartPosition(%s)::"
1136 isAudio
? "audio" : "video")
1139 if (auto& state
= master
->mStateObj
;
1141 state
->GetState() != DECODER_STATE_LOOPING_DECODING
) {
1142 MOZ_RELEASE_ASSERT(false, "This shouldn't happen!");
1146 mAudioSeekRequest
.Complete();
1148 mVideoSeekRequest
.Complete();
1151 "seeking completed, start to request first %s sample "
1152 "(queued=%zu, decoder-queued=%zu)",
1153 isAudio
? "audio" : "video",
1154 isAudio
? AudioQueue().GetSize() : VideoQueue().GetSize(),
1155 isAudio
? Reader()->SizeOfAudioQueueInFrames()
1156 : Reader()->SizeOfVideoQueueInFrames());
1158 RequestAudioDataFromReaderAfterEOS();
1160 RequestVideoDataFromReaderAfterEOS();
1162 if (mPendingSeekingType
) {
1163 auto seekingType
= *mPendingSeekingType
;
1164 mPendingSeekingType
.reset();
1165 SLOG("Perform pending %s seeking", TrackTypeToStr(seekingType
));
1166 RequestDataFromStartPosition(seekingType
);
1169 [this, isAudio
, master
= RefPtr
{mMaster
}](
1170 const SeekRejectValue
& aReject
) mutable -> void {
1171 AUTO_PROFILER_LABEL(
1172 nsPrintfCString("LoopingDecodingState::"
1173 "RequestDataFromStartPosition(%s)::"
1175 isAudio
? "audio" : "video")
1178 if (auto& state
= master
->mStateObj
;
1180 state
->GetState() != DECODER_STATE_LOOPING_DECODING
) {
1181 MOZ_RELEASE_ASSERT(false, "This shouldn't happen!");
1185 mAudioSeekRequest
.Complete();
1187 mVideoSeekRequest
.Complete();
1189 HandleError(aReject
.mError
, isAudio
);
1191 ->Track(seekRequest
);
1194 void RequestAudioDataFromReaderAfterEOS() {
1195 MOZ_ASSERT(mMaster
->HasAudio());
1197 ->RequestAudioData()
1199 OwnerThread(), __func__
,
1200 [this, master
= RefPtr
{mMaster
}](const RefPtr
<AudioData
>& aAudio
) {
1201 AUTO_PROFILER_LABEL(
1202 "LoopingDecodingState::"
1203 "RequestAudioDataFromReader::"
1204 "RequestDataResolved",
1206 if (auto& state
= master
->mStateObj
;
1208 state
->GetState() != DECODER_STATE_LOOPING_DECODING
) {
1209 MOZ_RELEASE_ASSERT(false, "This shouldn't happen!");
1212 mIsReachingAudioEOS
= false;
1213 mAudioDataRequest
.Complete();
1215 "got audio decoded sample "
1216 "[%" PRId64
",%" PRId64
"]",
1217 aAudio
->mTime
.ToMicroseconds(),
1218 aAudio
->GetEndTime().ToMicroseconds());
1219 if (ShouldPutDataOnWaiting(MediaData::Type::AUDIO_DATA
)) {
1221 "decoded audio sample needs to wait for timestamp "
1222 "adjustment after EOS");
1223 PutDataOnWaiting(aAudio
);
1226 HandleAudioDecoded(aAudio
);
1227 ProcessSamplesWaitingAdjustmentIfAny();
1229 [this, master
= RefPtr
{mMaster
}](const MediaResult
& aError
) {
1230 AUTO_PROFILER_LABEL(
1231 "LoopingDecodingState::"
1232 "RequestAudioDataFromReader::"
1233 "RequestDataRejected",
1235 if (auto& state
= master
->mStateObj
;
1237 state
->GetState() != DECODER_STATE_LOOPING_DECODING
) {
1238 MOZ_RELEASE_ASSERT(false, "This shouldn't happen!");
1241 mAudioDataRequest
.Complete();
1242 HandleError(aError
, true /* isAudio */);
1244 ->Track(mAudioDataRequest
);
1247 void RequestVideoDataFromReaderAfterEOS() {
1248 MOZ_ASSERT(mMaster
->HasVideo());
1250 ->RequestVideoData(media::TimeUnit(),
1251 false /* aRequestNextVideoKeyFrame */)
1253 OwnerThread(), __func__
,
1254 [this, master
= RefPtr
{mMaster
}](const RefPtr
<VideoData
>& aVideo
) {
1255 AUTO_PROFILER_LABEL(
1256 "LoopingDecodingState::"
1257 "RequestVideoDataFromReaderAfterEOS()::"
1258 "RequestDataResolved",
1260 if (auto& state
= master
->mStateObj
;
1262 state
->GetState() != DECODER_STATE_LOOPING_DECODING
) {
1263 MOZ_RELEASE_ASSERT(false, "This shouldn't happen!");
1266 mIsReachingVideoEOS
= false;
1267 mVideoDataRequest
.Complete();
1269 "got video decoded sample "
1270 "[%" PRId64
",%" PRId64
"]",
1271 aVideo
->mTime
.ToMicroseconds(),
1272 aVideo
->GetEndTime().ToMicroseconds());
1273 if (ShouldPutDataOnWaiting(MediaData::Type::VIDEO_DATA
)) {
1275 "decoded video sample needs to wait for timestamp "
1276 "adjustment after EOS");
1277 PutDataOnWaiting(aVideo
);
1280 mMaster
->mBypassingSkipToNextKeyFrameCheck
= true;
1281 HandleVideoDecoded(aVideo
);
1282 ProcessSamplesWaitingAdjustmentIfAny();
1284 [this, master
= RefPtr
{mMaster
}](const MediaResult
& aError
) {
1285 AUTO_PROFILER_LABEL(
1286 "LoopingDecodingState::"
1287 "RequestVideoDataFromReaderAfterEOS()::"
1288 "RequestDataRejected",
1290 if (auto& state
= master
->mStateObj
;
1292 state
->GetState() != DECODER_STATE_LOOPING_DECODING
) {
1293 MOZ_RELEASE_ASSERT(false, "This shouldn't happen!");
1296 mVideoDataRequest
.Complete();
1297 HandleError(aError
, false /* isAudio */);
1299 ->Track(mVideoDataRequest
);
1302 void HandleError(const MediaResult
& aError
, bool aIsAudio
);
1304 bool ShouldRequestData(MediaData::Type aType
) const {
1305 MOZ_DIAGNOSTIC_ASSERT(aType
== MediaData::Type::AUDIO_DATA
||
1306 aType
== MediaData::Type::VIDEO_DATA
);
1308 if (aType
== MediaData::Type::AUDIO_DATA
&&
1309 (mAudioSeekRequest
.Exists() || mAudioDataRequest
.Exists() ||
1310 IsDataWaitingForTimestampAdjustment(MediaData::Type::AUDIO_DATA
) ||
1311 mMaster
->IsWaitingAudioData())) {
1314 if (aType
== MediaData::Type::VIDEO_DATA
&&
1315 (mVideoSeekRequest
.Exists() || mVideoDataRequest
.Exists() ||
1316 IsDataWaitingForTimestampAdjustment(MediaData::Type::VIDEO_DATA
) ||
1317 mMaster
->IsWaitingVideoData())) {
1323 void HandleAudioCanceled() override
{
1324 if (ShouldRequestData(MediaData::Type::AUDIO_DATA
)) {
1325 mMaster
->RequestAudioData();
1329 void HandleAudioWaited(MediaData::Type aType
) override
{
1330 if (ShouldRequestData(MediaData::Type::AUDIO_DATA
)) {
1331 mMaster
->RequestAudioData();
1335 void HandleVideoCanceled() override
{
1336 if (ShouldRequestData(MediaData::Type::VIDEO_DATA
)) {
1337 mMaster
->RequestVideoData(mMaster
->GetMediaTime(),
1338 ShouldRequestNextKeyFrame());
1342 void HandleVideoWaited(MediaData::Type aType
) override
{
1343 if (ShouldRequestData(MediaData::Type::VIDEO_DATA
)) {
1344 mMaster
->RequestVideoData(mMaster
->GetMediaTime(),
1345 ShouldRequestNextKeyFrame());
1349 void EnsureAudioDecodeTaskQueued() override
{
1350 if (!ShouldRequestData(MediaData::Type::AUDIO_DATA
)) {
1353 DecodingState::EnsureAudioDecodeTaskQueued();
1356 void EnsureVideoDecodeTaskQueued() override
{
1357 if (!ShouldRequestData(MediaData::Type::VIDEO_DATA
)) {
1360 DecodingState::EnsureVideoDecodeTaskQueued();
1363 bool DetermineOriginalDecodedDurationIfNeeded() {
1364 // Duration would only need to be set once, unless we get more data which is
1365 // larger than the duration. That can happen on MSE (reopen stream).
1366 if (mMaster
->mOriginalDecodedDuration
!= media::TimeUnit::Zero()) {
1370 // Single track situations
1371 if (mMaster
->HasAudio() && !mMaster
->HasVideo() &&
1372 mMaster
->mAudioTrackDecodedDuration
) {
1373 mMaster
->mOriginalDecodedDuration
= *mMaster
->mAudioTrackDecodedDuration
;
1374 SLOG("audio only, duration=%" PRId64
,
1375 mMaster
->mOriginalDecodedDuration
.ToMicroseconds());
1378 if (mMaster
->HasVideo() && !mMaster
->HasAudio() &&
1379 mMaster
->mVideoTrackDecodedDuration
) {
1380 mMaster
->mOriginalDecodedDuration
= *mMaster
->mVideoTrackDecodedDuration
;
1381 SLOG("video only, duration=%" PRId64
,
1382 mMaster
->mOriginalDecodedDuration
.ToMicroseconds());
1385 // Two tracks situation
1386 if (mMaster
->HasAudio() && mMaster
->HasVideo()) {
1387 // Both tracks have ended so that we can check which track is longer.
1388 if (mMaster
->mAudioTrackDecodedDuration
&&
1389 mMaster
->mVideoTrackDecodedDuration
) {
1390 mMaster
->mOriginalDecodedDuration
=
1391 std::max(*mMaster
->mVideoTrackDecodedDuration
,
1392 *mMaster
->mAudioTrackDecodedDuration
);
1393 SLOG("Both tracks ended, original duration=%" PRId64
" (a=%" PRId64
1395 mMaster
->mOriginalDecodedDuration
.ToMicroseconds(),
1396 mMaster
->mAudioTrackDecodedDuration
->ToMicroseconds(),
1397 mMaster
->mVideoTrackDecodedDuration
->ToMicroseconds());
1400 // When entering the state, video has ended but audio hasn't, which means
1402 if (mMaster
->mAudioTrackDecodedDuration
&&
1403 mVideoEndedBeforeEnteringStateWithoutDuration
) {
1404 mMaster
->mOriginalDecodedDuration
=
1405 *mMaster
->mAudioTrackDecodedDuration
;
1406 mVideoEndedBeforeEnteringStateWithoutDuration
= false;
1407 SLOG("audio is longer, duration=%" PRId64
,
1408 mMaster
->mOriginalDecodedDuration
.ToMicroseconds());
1411 // When entering the state, audio has ended but video hasn't, which means
1413 if (mMaster
->mVideoTrackDecodedDuration
&&
1414 mAudioEndedBeforeEnteringStateWithoutDuration
) {
1415 mMaster
->mOriginalDecodedDuration
=
1416 *mMaster
->mVideoTrackDecodedDuration
;
1417 mAudioEndedBeforeEnteringStateWithoutDuration
= false;
1418 SLOG("video is longer, duration=%" PRId64
,
1419 mMaster
->mOriginalDecodedDuration
.ToMicroseconds());
1422 SLOG("Still waiting for another track ends...");
1423 MOZ_ASSERT(!mMaster
->mAudioTrackDecodedDuration
||
1424 !mMaster
->mVideoTrackDecodedDuration
);
1426 SLOG("can't determine the original decoded duration yet");
1427 MOZ_ASSERT(mMaster
->mOriginalDecodedDuration
== media::TimeUnit::Zero());
1431 void ProcessSamplesWaitingAdjustmentIfAny() {
1432 if (!mDataWaitingTimestampAdjustment
) {
1436 RefPtr
<MediaData
> data
= mDataWaitingTimestampAdjustment
;
1437 mDataWaitingTimestampAdjustment
= nullptr;
1438 const bool isAudio
= data
->mType
== MediaData::Type::AUDIO_DATA
;
1439 SLOG("process %s sample waiting for timestamp adjustment",
1440 isAudio
? "audio" : "video");
1442 // Waiting sample is for next round of looping, so the queue offset
1443 // shouldn't be zero. This happens when the track has reached EOS before
1444 // entering the state (and looping never happens before). Same for below
1446 if (AudioQueue().GetOffset() == media::TimeUnit::Zero()) {
1447 AudioQueue().SetOffset(mMaster
->mOriginalDecodedDuration
);
1449 HandleAudioDecoded(data
->As
<AudioData
>());
1451 MOZ_DIAGNOSTIC_ASSERT(data
->mType
== MediaData::Type::VIDEO_DATA
);
1452 if (VideoQueue().GetOffset() == media::TimeUnit::Zero()) {
1453 VideoQueue().SetOffset(mMaster
->mOriginalDecodedDuration
);
1455 HandleVideoDecoded(data
->As
<VideoData
>());
1459 bool IsDataWaitingForTimestampAdjustment(MediaData::Type aType
) const {
1460 return mDataWaitingTimestampAdjustment
&&
1461 mDataWaitingTimestampAdjustment
->mType
== aType
;
1464 bool ShouldPutDataOnWaiting(MediaData::Type aType
) const {
1465 // If another track is already waiting, this track shouldn't be waiting.
1466 // This case only happens when both tracks reached EOS before entering the
1467 // looping decoding state, so we don't know the decoded duration yet (used
1468 // to adjust timestamp) But this is fine, because both tracks will start
1469 // from 0 so we don't need to adjust them now.
1470 if (mDataWaitingTimestampAdjustment
&&
1471 !IsDataWaitingForTimestampAdjustment(aType
)) {
1475 // Only have one track, no need to wait.
1476 if ((aType
== MediaData::Type::AUDIO_DATA
&& !mMaster
->HasVideo()) ||
1477 (aType
== MediaData::Type::VIDEO_DATA
&& !mMaster
->HasAudio())) {
1481 // We don't know the duration yet, so we can't calculate the looping offset.
1482 return mMaster
->mOriginalDecodedDuration
== media::TimeUnit::Zero();
1485 void PutDataOnWaiting(MediaData
* aData
) {
1486 MOZ_ASSERT(!mDataWaitingTimestampAdjustment
);
1487 mDataWaitingTimestampAdjustment
= aData
;
1488 SLOG("put %s [%" PRId64
",%" PRId64
"] on waiting",
1489 MediaData::EnumValueToString(aData
->mType
),
1490 aData
->mTime
.ToMicroseconds(), aData
->GetEndTime().ToMicroseconds());
1491 MaybeStopPrerolling();
1494 bool ShouldDiscardLoopedData(MediaData::Type aType
) const {
1495 if (!mMaster
->mMediaSink
->IsStarted()) {
1499 MOZ_DIAGNOSTIC_ASSERT(aType
== MediaData::Type::AUDIO_DATA
||
1500 aType
== MediaData::Type::VIDEO_DATA
);
1501 const bool isAudio
= aType
== MediaData::Type::AUDIO_DATA
;
1502 if (isAudio
&& !mMaster
->HasAudio()) {
1505 if (!isAudio
&& !mMaster
->HasVideo()) {
1510 * If media cancels looping, we should check whether there is media data
1511 * whose time is later than EOS. If so, we should discard them because we
1512 * won't have a chance to play them.
1514 * playback last decoded
1515 * position EOS data time
1516 * ----|---------------|------------|---------> (Increasing timeline)
1517 * mCurrent looping mMaster's
1518 * ClockTime offset mDecodedXXXEndTime
1522 isAudio
? AudioQueue().GetOffset() : VideoQueue().GetOffset();
1523 const auto endTime
=
1524 isAudio
? mMaster
->mDecodedAudioEndTime
: mMaster
->mDecodedVideoEndTime
;
1525 const auto clockTime
= mMaster
->GetClock();
1526 return (offset
!= media::TimeUnit::Zero() && clockTime
< offset
&&
1530 void DiscardLoopedData(MediaData::Type aType
) {
1531 MOZ_DIAGNOSTIC_ASSERT(aType
== MediaData::Type::AUDIO_DATA
||
1532 aType
== MediaData::Type::VIDEO_DATA
);
1533 const bool isAudio
= aType
== MediaData::Type::AUDIO_DATA
;
1535 isAudio
? AudioQueue().GetOffset() : VideoQueue().GetOffset();
1536 if (offset
== media::TimeUnit::Zero()) {
1540 SLOG("Discard %s frames after the time=%" PRId64
,
1541 isAudio
? "audio" : "video", offset
.ToMicroseconds());
1543 DiscardFramesFromTail(AudioQueue(), [&](int64_t aSampleTime
) {
1544 return aSampleTime
> offset
.ToMicroseconds();
1547 DiscardFramesFromTail(VideoQueue(), [&](int64_t aSampleTime
) {
1548 return aSampleTime
> offset
.ToMicroseconds();
1553 void PushFakeAudioDataIfNeeded(const media::TimeUnit
& aDuration
) {
1554 MOZ_ASSERT(Info().HasAudio());
1556 const auto& audioInfo
= Info().mAudio
;
1557 CheckedInt64 frames
= aDuration
.ToTicksAtRate(audioInfo
.mRate
);
1558 if (!frames
.isValid() || !audioInfo
.mChannels
|| !audioInfo
.mRate
) {
1559 NS_WARNING("Can't create fake audio, invalid frames/channel/rate?");
1563 if (!frames
.value()) {
1564 NS_WARNING(nsPrintfCString("Duration (%s) too short, no frame needed",
1565 aDuration
.ToString().get())
1570 // If we can get the last sample, use its frame. Otherwise, use common 1024.
1571 int64_t typicalPacketFrameCount
= 1024;
1572 if (RefPtr
<AudioData
> audio
= AudioQueue().PeekBack()) {
1573 typicalPacketFrameCount
= audio
->Frames();
1576 media::TimeUnit totalDuration
= TimeUnit::Zero(audioInfo
.mRate
);
1577 // Generate fake audio in a smaller size of audio chunk.
1578 while (frames
.value()) {
1579 int64_t packetFrameCount
=
1580 std::min(frames
.value(), typicalPacketFrameCount
);
1581 frames
-= packetFrameCount
;
1582 AlignedAudioBuffer
samples(packetFrameCount
* audioInfo
.mChannels
);
1584 NS_WARNING("Can't create audio buffer, OOM?");
1587 // `mDecodedAudioEndTime` is adjusted time, and we want unadjusted time
1588 // otherwise the time would be adjusted twice when pushing sample into the
1590 media::TimeUnit startTime
= mMaster
->mDecodedAudioEndTime
;
1591 if (AudioQueue().GetOffset() != media::TimeUnit::Zero()) {
1592 startTime
-= AudioQueue().GetOffset();
1594 RefPtr
<AudioData
> data(new AudioData(0, startTime
, std::move(samples
),
1595 audioInfo
.mChannels
,
1597 SLOG("Created fake audio data (duration=%s, frame-left=%" PRId64
")",
1598 data
->mDuration
.ToString().get(), frames
.value());
1599 totalDuration
+= data
->mDuration
;
1600 HandleAudioDecoded(data
);
1602 SLOG("Pushed fake silence audio data in total duration=%" PRId64
"%s",
1603 totalDuration
.ToMicroseconds(), totalDuration
.ToString().get());
1606 bool HasDecodedLastAudioFrame() const {
1607 // when we're going to leave looping state and have got EOS before, we
1608 // should mark audio queue as ended because we have got all data we need.
1609 return mAudioDataRequest
.Exists() || mAudioSeekRequest
.Exists() ||
1610 ShouldDiscardLoopedData(MediaData::Type::AUDIO_DATA
) ||
1611 IsDataWaitingForTimestampAdjustment(MediaData::Type::AUDIO_DATA
) ||
1612 mIsReachingAudioEOS
;
1615 bool HasDecodedLastVideoFrame() const {
1616 // when we're going to leave looping state and have got EOS before, we
1617 // should mark video queue as ended because we have got all data we need.
1618 return mVideoDataRequest
.Exists() || mVideoSeekRequest
.Exists() ||
1619 ShouldDiscardLoopedData(MediaData::Type::VIDEO_DATA
) ||
1620 IsDataWaitingForTimestampAdjustment(MediaData::Type::VIDEO_DATA
) ||
1621 mIsReachingVideoEOS
;
1624 bool ShouldStopPrerolling() const override
{
1625 // These checks is used to handle the media queue aren't opened correctly
1626 // because they've been close before entering the looping state. Therefore,
1627 // we need to preroll data in order to let new data to reopen the queue
1628 // automatically. Otherwise, playback can't start successfully.
1629 bool isWaitingForNewData
= false;
1630 if (mMaster
->HasAudio()) {
1631 isWaitingForNewData
|= (mIsReachingAudioEOS
&& AudioQueue().IsFinished());
1633 if (mMaster
->HasVideo()) {
1634 isWaitingForNewData
|= (mIsReachingVideoEOS
&& VideoQueue().IsFinished());
1636 return !isWaitingForNewData
&& DecodingState::ShouldStopPrerolling();
1639 bool IsReaderSeeking() const {
1640 return mAudioSeekRequest
.Exists() || mVideoSeekRequest
.Exists();
1643 bool IsWaitingData(MediaData::Type aType
) const override
{
1644 if (aType
== MediaData::Type::AUDIO_DATA
) {
1645 return mMaster
->IsWaitingAudioData() ||
1646 IsDataWaitingForTimestampAdjustment(MediaData::Type::AUDIO_DATA
);
1648 MOZ_DIAGNOSTIC_ASSERT(aType
== MediaData::Type::VIDEO_DATA
);
1649 return mMaster
->IsWaitingVideoData() ||
1650 IsDataWaitingForTimestampAdjustment(MediaData::Type::VIDEO_DATA
);
1653 bool IsRequestingDataFromStartPosition(MediaData::Type aType
) const {
1654 MOZ_DIAGNOSTIC_ASSERT(aType
== MediaData::Type::AUDIO_DATA
||
1655 aType
== MediaData::Type::VIDEO_DATA
);
1656 if (aType
== MediaData::Type::AUDIO_DATA
) {
1657 return mAudioSeekRequest
.Exists() || mAudioDataRequest
.Exists();
1659 return mVideoSeekRequest
.Exists() || mVideoDataRequest
.Exists();
1662 bool IsBufferingAllowed() const override
{
1663 return !mIsReachingAudioEOS
&& !mIsReachingVideoEOS
;
1666 bool mIsReachingAudioEOS
;
1667 bool mIsReachingVideoEOS
;
1670 * If we have both tracks which have different length, when one track ends
1671 * first, we can't adjust new data from that track if another longer track
1672 * hasn't ended yet. The adjusted timestamp needs to be based off the longer
1673 * track's last data's timestamp, because otherwise it would cause a deviation
1674 * and eventually a/v unsync. Those sample needs to be stored and we will
1675 * adjust their timestamp later.
1677 * Following graph explains the situation in details.
1678 * o : decoded data with timestamp adjusted or no adjustment (not looping yet)
1679 * x : decoded data without timestamp adjustment.
1680 * - : stop decoding and nothing happens
1681 * EOS : the track reaches to the end. We now know the offset of the track.
1683 * Timeline ----------------------------------->
1684 * Track1 : o EOS x - - o
1685 * Track2 : o o o EOS o o
1687 * Before reaching track2's EOS, we can't adjust samples from track1 because
1688 * track2 might have longer duration than track1. The sample X would be
1689 * stored in `mDataWaitingTimestampAdjustment` and we would also stop decoding
1692 * After reaching track2's EOS, now we know another track's offset, and the
1693 * larger one would be used for `mOriginalDecodedDuration`. Once that duration
1694 * has been determined, we will no longer need to put samples on waiting
1695 * because we already know how to adjust timestamp.
1697 RefPtr
<MediaData
> mDataWaitingTimestampAdjustment
;
1699 MozPromiseRequestHolder
<MediaFormatReader::SeekPromise
> mAudioSeekRequest
;
1700 MozPromiseRequestHolder
<MediaFormatReader::SeekPromise
> mVideoSeekRequest
;
1701 MozPromiseRequestHolder
<AudioDataPromise
> mAudioDataRequest
;
1702 MozPromiseRequestHolder
<VideoDataPromise
> mVideoDataRequest
;
1704 // The media format reader only allows seeking a track at a time, if we're
1705 // already in seeking, then delay the new seek until the current one finishes.
1706 Maybe
<TrackInfo::TrackType
> mPendingSeekingType
;
1708 // These are used to track a special case where the playback starts from EOS
1709 // position via seeking. So even if EOS has reached, none of data has been
1710 // decoded yet. They will be reset when `mOriginalDecodedDuration` is
1712 bool mAudioEndedBeforeEnteringStateWithoutDuration
;
1713 bool mVideoEndedBeforeEnteringStateWithoutDuration
;
1717 * Purpose: seek to a particular new playback position.
1720 * SEEKING if any new seek request.
1721 * SHUTDOWN if seek failed.
1722 * COMPLETED if the new playback position is the end of the media resource.
1723 * NextFrameSeekingState if completing a NextFrameSeekingFromDormantState.
1724 * DECODING/LOOPING_DECODING otherwise.
1726 class MediaDecoderStateMachine::SeekingState
1727 : public MediaDecoderStateMachine::StateObject
{
1729 explicit SeekingState(Master
* aPtr
)
1730 : StateObject(aPtr
), mVisibility(static_cast<EventVisibility
>(0)) {}
1732 RefPtr
<MediaDecoder::SeekPromise
> Enter(SeekJob
&& aSeekJob
,
1733 EventVisibility aVisibility
) {
1734 mSeekJob
= std::move(aSeekJob
);
1735 mVisibility
= aVisibility
;
1737 // Suppressed visibility comes from two cases: (1) leaving dormant state,
1738 // and (2) resuming suspended video decoder. We want both cases to be
1739 // transparent to the user. So we only notify the change when the seek
1740 // request is from the user.
1741 if (mVisibility
== EventVisibility::Observable
) {
1742 // Don't stop playback for a video-only seek since we want to keep playing
1743 // audio and we don't need to stop playback while leaving dormant for the
1744 // playback should has been stopped.
1745 mMaster
->StopPlayback();
1746 mMaster
->UpdatePlaybackPositionInternal(mSeekJob
.mTarget
->GetTime());
1747 mMaster
->mOnPlaybackEvent
.Notify(MediaPlaybackEvent::SeekStarted
);
1748 mMaster
->mOnNextFrameStatus
.Notify(
1749 MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_SEEKING
);
1752 RefPtr
<MediaDecoder::SeekPromise
> p
= mSeekJob
.mPromise
.Ensure(__func__
);
1759 virtual void Exit() override
= 0;
1761 State
GetState() const override
= 0;
1763 void HandleAudioDecoded(AudioData
* aAudio
) override
= 0;
1764 void HandleVideoDecoded(VideoData
* aVideo
) override
= 0;
1765 void HandleAudioWaited(MediaData::Type aType
) override
= 0;
1766 void HandleVideoWaited(MediaData::Type aType
) override
= 0;
1768 void HandleVideoSuspendTimeout() override
{
1769 // Do nothing since we want a valid video frame to show when seek is done.
1772 void HandleResumeVideoDecoding(const TimeUnit
&) override
{
1773 // Do nothing. We will resume video decoding in the decoding state.
1776 // We specially handle next frame seeks by ignoring them if we're already
1778 RefPtr
<MediaDecoder::SeekPromise
> HandleSeek(
1779 const SeekTarget
& aTarget
) override
{
1780 if (aTarget
.IsNextFrame()) {
1781 // We ignore next frame seeks if we already have a seek pending
1782 SLOG("Already SEEKING, ignoring seekToNextFrame");
1783 MOZ_ASSERT(!mSeekJob
.mPromise
.IsEmpty(), "Seek shouldn't be finished");
1784 return MediaDecoder::SeekPromise::CreateAndReject(
1785 /* aRejectValue = */ true, __func__
);
1788 return StateObject::HandleSeek(aTarget
);
1793 EventVisibility mVisibility
;
1795 virtual void DoSeek() = 0;
1796 // Transition to the next state (defined by the subclass) when seek is
1798 virtual void GoToNextState() { SetDecodingState(); }
1799 void SeekCompleted();
1800 virtual TimeUnit
CalculateNewCurrentTime() const = 0;
1803 class MediaDecoderStateMachine::AccurateSeekingState
1804 : public MediaDecoderStateMachine::SeekingState
{
1806 explicit AccurateSeekingState(Master
* aPtr
) : SeekingState(aPtr
) {}
1808 State
GetState() const override
{ return DECODER_STATE_SEEKING_ACCURATE
; }
1810 RefPtr
<MediaDecoder::SeekPromise
> Enter(SeekJob
&& aSeekJob
,
1811 EventVisibility aVisibility
) {
1812 MOZ_ASSERT(aSeekJob
.mTarget
->IsAccurate() || aSeekJob
.mTarget
->IsFast());
1813 mCurrentTimeBeforeSeek
= mMaster
->GetMediaTime();
1814 return SeekingState::Enter(std::move(aSeekJob
), aVisibility
);
1817 void Exit() override
{
1818 // Disconnect MediaDecoder.
1819 mSeekJob
.RejectIfExists(__func__
);
1821 // Disconnect ReaderProxy.
1822 mSeekRequest
.DisconnectIfExists();
1824 mWaitRequest
.DisconnectIfExists();
1827 void HandleAudioDecoded(AudioData
* aAudio
) override
{
1828 MOZ_ASSERT(!mDoneAudioSeeking
|| !mDoneVideoSeeking
,
1829 "Seek shouldn't be finished");
1832 AdjustFastSeekIfNeeded(aAudio
);
1834 if (mSeekJob
.mTarget
->IsFast()) {
1835 // Non-precise seek; we can stop the seek at the first sample.
1836 mMaster
->PushAudio(aAudio
);
1837 mDoneAudioSeeking
= true;
1839 nsresult rv
= DropAudioUpToSeekTarget(aAudio
);
1840 if (NS_FAILED(rv
)) {
1841 mMaster
->DecodeError(rv
);
1846 if (!mDoneAudioSeeking
) {
1853 void HandleVideoDecoded(VideoData
* aVideo
) override
{
1854 MOZ_ASSERT(!mDoneAudioSeeking
|| !mDoneVideoSeeking
,
1855 "Seek shouldn't be finished");
1858 AdjustFastSeekIfNeeded(aVideo
);
1860 if (mSeekJob
.mTarget
->IsFast()) {
1861 // Non-precise seek. We can stop the seek at the first sample.
1862 mMaster
->PushVideo(aVideo
);
1863 mDoneVideoSeeking
= true;
1865 nsresult rv
= DropVideoUpToSeekTarget(aVideo
);
1866 if (NS_FAILED(rv
)) {
1867 mMaster
->DecodeError(rv
);
1872 if (!mDoneVideoSeeking
) {
1879 void HandleWaitingForAudio() override
{
1880 MOZ_ASSERT(!mDoneAudioSeeking
);
1881 mMaster
->WaitForData(MediaData::Type::AUDIO_DATA
);
1884 void HandleAudioCanceled() override
{
1885 MOZ_ASSERT(!mDoneAudioSeeking
);
1889 void HandleEndOfAudio() override
{
1890 HandleEndOfAudioInternal();
1894 void HandleWaitingForVideo() override
{
1895 MOZ_ASSERT(!mDoneVideoSeeking
);
1896 mMaster
->WaitForData(MediaData::Type::VIDEO_DATA
);
1899 void HandleVideoCanceled() override
{
1900 MOZ_ASSERT(!mDoneVideoSeeking
);
1904 void HandleEndOfVideo() override
{
1905 HandleEndOfVideoInternal();
1909 void HandleAudioWaited(MediaData::Type aType
) override
{
1910 MOZ_ASSERT(!mDoneAudioSeeking
|| !mDoneVideoSeeking
,
1911 "Seek shouldn't be finished");
1916 void HandleVideoWaited(MediaData::Type aType
) override
{
1917 MOZ_ASSERT(!mDoneAudioSeeking
|| !mDoneVideoSeeking
,
1918 "Seek shouldn't be finished");
1923 void DoSeek() override
{
1924 mDoneAudioSeeking
= !Info().HasAudio();
1925 mDoneVideoSeeking
= !Info().HasVideo();
1927 // Resetting decode should be called after stopping media sink, which can
1928 // ensure that we have an empty media queue before seeking the demuxer.
1929 mMaster
->StopMediaSink();
1930 mMaster
->ResetDecode();
1935 TimeUnit
CalculateNewCurrentTime() const override
{
1936 const auto seekTime
= mSeekJob
.mTarget
->GetTime();
1938 // For the accurate seek, we always set the newCurrentTime = seekTime so
1939 // that the updated HTMLMediaElement.currentTime will always be the seek
1940 // target; we rely on the MediaSink to handles the gap between the
1941 // newCurrentTime and the real decoded samples' start time.
1942 if (mSeekJob
.mTarget
->IsAccurate()) {
1946 // For the fast seek, we update the newCurrentTime with the decoded audio
1947 // and video samples, set it to be the one which is closet to the seekTime.
1948 if (mSeekJob
.mTarget
->IsFast()) {
1949 RefPtr
<AudioData
> audio
= AudioQueue().PeekFront();
1950 RefPtr
<VideoData
> video
= VideoQueue().PeekFront();
1952 // A situation that both audio and video approaches the end.
1953 if (!audio
&& !video
) {
1957 const int64_t audioStart
=
1958 audio
? audio
->mTime
.ToMicroseconds() : INT64_MAX
;
1959 const int64_t videoStart
=
1960 video
? video
->mTime
.ToMicroseconds() : INT64_MAX
;
1961 const int64_t audioGap
= std::abs(audioStart
- seekTime
.ToMicroseconds());
1962 const int64_t videoGap
= std::abs(videoStart
- seekTime
.ToMicroseconds());
1963 return TimeUnit::FromMicroseconds(audioGap
<= videoGap
? audioStart
1967 MOZ_ASSERT(false, "AccurateSeekTask doesn't handle other seek types.");
1968 return TimeUnit::Zero();
1972 void DemuxerSeek() {
1973 // Request the demuxer to perform seek.
1975 ->Seek(mSeekJob
.mTarget
.ref())
1977 OwnerThread(), __func__
,
1978 [this](const media::TimeUnit
& aUnit
) { OnSeekResolved(aUnit
); },
1979 [this](const SeekRejectValue
& aReject
) { OnSeekRejected(aReject
); })
1980 ->Track(mSeekRequest
);
1983 void OnSeekResolved(media::TimeUnit
) {
1984 AUTO_PROFILER_LABEL("AccurateSeekingState::OnSeekResolved", MEDIA_PLAYBACK
);
1985 mSeekRequest
.Complete();
1987 // We must decode the first samples of active streams, so we can determine
1988 // the new stream time. So dispatch tasks to do that.
1989 if (!mDoneVideoSeeking
) {
1992 if (!mDoneAudioSeeking
) {
1997 void OnSeekRejected(const SeekRejectValue
& aReject
) {
1998 AUTO_PROFILER_LABEL("AccurateSeekingState::OnSeekRejected", MEDIA_PLAYBACK
);
1999 mSeekRequest
.Complete();
2001 if (aReject
.mError
== NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA
) {
2002 SLOG("OnSeekRejected reason=WAITING_FOR_DATA type=%s",
2003 MediaData::EnumValueToString(aReject
.mType
));
2004 MOZ_ASSERT_IF(aReject
.mType
== MediaData::Type::AUDIO_DATA
,
2005 !mMaster
->IsRequestingAudioData());
2006 MOZ_ASSERT_IF(aReject
.mType
== MediaData::Type::VIDEO_DATA
,
2007 !mMaster
->IsRequestingVideoData());
2008 MOZ_ASSERT_IF(aReject
.mType
== MediaData::Type::AUDIO_DATA
,
2009 !mMaster
->IsWaitingAudioData());
2010 MOZ_ASSERT_IF(aReject
.mType
== MediaData::Type::VIDEO_DATA
,
2011 !mMaster
->IsWaitingVideoData());
2013 // Fire 'waiting' to notify the player that we are waiting for data.
2014 mMaster
->mOnNextFrameStatus
.Notify(
2015 MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_SEEKING
);
2018 ->WaitForData(aReject
.mType
)
2020 OwnerThread(), __func__
,
2021 [this](MediaData::Type aType
) {
2022 AUTO_PROFILER_LABEL(
2023 "AccurateSeekingState::OnSeekRejected:WaitDataResolved",
2025 SLOG("OnSeekRejected wait promise resolved");
2026 mWaitRequest
.Complete();
2029 [this](const WaitForDataRejectValue
& aRejection
) {
2030 AUTO_PROFILER_LABEL(
2031 "AccurateSeekingState::OnSeekRejected:WaitDataRejected",
2033 SLOG("OnSeekRejected wait promise rejected");
2034 mWaitRequest
.Complete();
2035 mMaster
->DecodeError(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA
);
2037 ->Track(mWaitRequest
);
2041 if (aReject
.mError
== NS_ERROR_DOM_MEDIA_END_OF_STREAM
) {
2042 if (!mDoneAudioSeeking
) {
2043 HandleEndOfAudioInternal();
2045 if (!mDoneVideoSeeking
) {
2046 HandleEndOfVideoInternal();
2052 MOZ_ASSERT(NS_FAILED(aReject
.mError
),
2053 "Cancels should also disconnect mSeekRequest");
2054 mMaster
->DecodeError(aReject
.mError
);
2057 void RequestAudioData() {
2058 MOZ_ASSERT(!mDoneAudioSeeking
);
2059 mMaster
->RequestAudioData();
2062 virtual void RequestVideoData() {
2063 MOZ_ASSERT(!mDoneVideoSeeking
);
2064 mMaster
->RequestVideoData(media::TimeUnit());
2067 void AdjustFastSeekIfNeeded(MediaData
* aSample
) {
2068 if (mSeekJob
.mTarget
->IsFast() &&
2069 mSeekJob
.mTarget
->GetTime() > mCurrentTimeBeforeSeek
&&
2070 aSample
->mTime
< mCurrentTimeBeforeSeek
) {
2071 // We are doing a fastSeek, but we ended up *before* the previous
2072 // playback position. This is surprising UX, so switch to an accurate
2073 // seek and decode to the seek target. This is not conformant to the
2074 // spec, fastSeek should always be fast, but until we get the time to
2075 // change all Readers to seek to the keyframe after the currentTime
2076 // in this case, we'll just decode forward. Bug 1026330.
2077 mSeekJob
.mTarget
->SetType(SeekTarget::Accurate
);
2081 nsresult
DropAudioUpToSeekTarget(AudioData
* aAudio
) {
2082 MOZ_ASSERT(aAudio
&& mSeekJob
.mTarget
->IsAccurate());
2084 if (mSeekJob
.mTarget
->GetTime() >= aAudio
->GetEndTime()) {
2085 // Our seek target lies after the frames in this AudioData. Don't
2086 // push it onto the audio queue, and keep decoding forwards.
2090 if (aAudio
->mTime
> mSeekJob
.mTarget
->GetTime()) {
2091 // The seek target doesn't lie in the audio block just after the last
2092 // audio frames we've seen which were before the seek target. This
2093 // could have been the first audio data we've seen after seek, i.e. the
2094 // seek terminated after the seek target in the audio stream. Just
2095 // abort the audio decode-to-target, the state machine will play
2096 // silence to cover the gap. Typically this happens in poorly muxed
2098 SLOGW("Audio not synced after seek, maybe a poorly muxed file?");
2099 mMaster
->PushAudio(aAudio
);
2100 mDoneAudioSeeking
= true;
2104 bool ok
= aAudio
->SetTrimWindow(
2105 {mSeekJob
.mTarget
->GetTime().ToBase(aAudio
->mTime
),
2106 aAudio
->GetEndTime()});
2108 return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR
;
2111 MOZ_ASSERT(AudioQueue().GetSize() == 0,
2112 "Should be the 1st sample after seeking");
2113 mMaster
->PushAudio(aAudio
);
2114 mDoneAudioSeeking
= true;
2119 nsresult
DropVideoUpToSeekTarget(VideoData
* aVideo
) {
2121 SLOG("DropVideoUpToSeekTarget() frame [%" PRId64
", %" PRId64
"]",
2122 aVideo
->mTime
.ToMicroseconds(), aVideo
->GetEndTime().ToMicroseconds());
2123 const auto target
= GetSeekTarget();
2125 // If the frame end time is less than the seek target, we won't want
2126 // to display this frame after the seek, so discard it.
2127 if (target
>= aVideo
->GetEndTime()) {
2128 SLOG("DropVideoUpToSeekTarget() pop video frame [%" PRId64
", %" PRId64
2129 "] target=%" PRId64
,
2130 aVideo
->mTime
.ToMicroseconds(),
2131 aVideo
->GetEndTime().ToMicroseconds(), target
.ToMicroseconds());
2132 PROFILER_MARKER_UNTYPED("MDSM::DropVideoUpToSeekTarget", MEDIA_PLAYBACK
);
2133 mFirstVideoFrameAfterSeek
= aVideo
;
2135 if (target
>= aVideo
->mTime
&& aVideo
->GetEndTime() >= target
) {
2136 // The seek target lies inside this frame's time slice. Adjust the
2137 // frame's start time to match the seek target.
2138 aVideo
->UpdateTimestamp(target
);
2140 mFirstVideoFrameAfterSeek
= nullptr;
2142 SLOG("DropVideoUpToSeekTarget() found video frame [%" PRId64
", %" PRId64
2143 "] containing target=%" PRId64
,
2144 aVideo
->mTime
.ToMicroseconds(),
2145 aVideo
->GetEndTime().ToMicroseconds(), target
.ToMicroseconds());
2147 MOZ_ASSERT(VideoQueue().GetSize() == 0,
2148 "Should be the 1st sample after seeking");
2149 mMaster
->PushVideo(aVideo
);
2150 mDoneVideoSeeking
= true;
2156 void HandleEndOfAudioInternal() {
2157 MOZ_ASSERT(!mDoneAudioSeeking
);
2158 AudioQueue().Finish();
2159 mDoneAudioSeeking
= true;
2162 void HandleEndOfVideoInternal() {
2163 MOZ_ASSERT(!mDoneVideoSeeking
);
2164 if (mFirstVideoFrameAfterSeek
) {
2165 // Hit the end of stream. Move mFirstVideoFrameAfterSeek into
2166 // mSeekedVideoData so we have something to display after seeking.
2167 mMaster
->PushVideo(mFirstVideoFrameAfterSeek
);
2169 VideoQueue().Finish();
2170 mDoneVideoSeeking
= true;
2173 void MaybeFinishSeek() {
2174 if (mDoneAudioSeeking
&& mDoneVideoSeeking
) {
2180 * Track the current seek promise made by the reader.
2182 MozPromiseRequestHolder
<MediaFormatReader::SeekPromise
> mSeekRequest
;
2187 media::TimeUnit mCurrentTimeBeforeSeek
;
2188 bool mDoneAudioSeeking
= false;
2189 bool mDoneVideoSeeking
= false;
2190 MozPromiseRequestHolder
<WaitForDataPromise
> mWaitRequest
;
2192 // This temporarily stores the first frame we decode after we seek.
2193 // This is so that if we hit end of stream while we're decoding to reach
2194 // the seek target, we will still have a frame that we can display as the
2195 // last frame in the media.
2196 RefPtr
<VideoData
> mFirstVideoFrameAfterSeek
;
2199 virtual media::TimeUnit
GetSeekTarget() const {
2200 return mSeekJob
.mTarget
->GetTime();
2205 * Remove samples from the queue until aCompare() returns false.
2206 * aCompare A function object with the signature bool(int64_t) which returns
2207 * true for samples that should be removed.
2209 template <typename Type
, typename Function
>
2210 static void DiscardFrames(MediaQueue
<Type
>& aQueue
, const Function
& aCompare
) {
2211 while (aQueue
.GetSize() > 0) {
2212 if (aCompare(aQueue
.PeekFront()->mTime
.ToMicroseconds())) {
2213 RefPtr
<Type
> releaseMe
= aQueue
.PopFront();
2220 class MediaDecoderStateMachine::NextFrameSeekingState
2221 : public MediaDecoderStateMachine::SeekingState
{
2223 explicit NextFrameSeekingState(Master
* aPtr
) : SeekingState(aPtr
) {}
2225 State
GetState() const override
{
2226 return DECODER_STATE_SEEKING_NEXTFRAMESEEKING
;
2229 RefPtr
<MediaDecoder::SeekPromise
> Enter(SeekJob
&& aSeekJob
,
2230 EventVisibility aVisibility
) {
2231 MOZ_ASSERT(aSeekJob
.mTarget
->IsNextFrame());
2232 mCurrentTime
= mMaster
->GetMediaTime();
2233 mDuration
= mMaster
->Duration();
2234 return SeekingState::Enter(std::move(aSeekJob
), aVisibility
);
2237 void Exit() override
{
2238 // Disconnect my async seek operation.
2239 if (mAsyncSeekTask
) {
2240 mAsyncSeekTask
->Cancel();
2243 // Disconnect MediaDecoder.
2244 mSeekJob
.RejectIfExists(__func__
);
2247 void HandleAudioDecoded(AudioData
* aAudio
) override
{
2248 mMaster
->PushAudio(aAudio
);
2251 void HandleVideoDecoded(VideoData
* aVideo
) override
{
2253 MOZ_ASSERT(!mSeekJob
.mPromise
.IsEmpty(), "Seek shouldn't be finished");
2254 MOZ_ASSERT(NeedMoreVideo());
2256 if (aVideo
->mTime
> mCurrentTime
) {
2257 mMaster
->PushVideo(aVideo
);
2264 void HandleWaitingForAudio() override
{
2265 MOZ_ASSERT(!mSeekJob
.mPromise
.IsEmpty(), "Seek shouldn't be finished");
2266 // We don't care about audio decode errors in this state which will be
2267 // handled by other states after seeking.
2270 void HandleAudioCanceled() override
{
2271 MOZ_ASSERT(!mSeekJob
.mPromise
.IsEmpty(), "Seek shouldn't be finished");
2272 // We don't care about audio decode errors in this state which will be
2273 // handled by other states after seeking.
2276 void HandleEndOfAudio() override
{
2277 MOZ_ASSERT(!mSeekJob
.mPromise
.IsEmpty(), "Seek shouldn't be finished");
2278 // We don't care about audio decode errors in this state which will be
2279 // handled by other states after seeking.
2282 void HandleWaitingForVideo() override
{
2283 MOZ_ASSERT(!mSeekJob
.mPromise
.IsEmpty(), "Seek shouldn't be finished");
2284 MOZ_ASSERT(NeedMoreVideo());
2285 mMaster
->WaitForData(MediaData::Type::VIDEO_DATA
);
2288 void HandleVideoCanceled() override
{
2289 MOZ_ASSERT(!mSeekJob
.mPromise
.IsEmpty(), "Seek shouldn't be finished");
2290 MOZ_ASSERT(NeedMoreVideo());
2294 void HandleEndOfVideo() override
{
2295 MOZ_ASSERT(!mSeekJob
.mPromise
.IsEmpty(), "Seek shouldn't be finished");
2296 MOZ_ASSERT(NeedMoreVideo());
2297 VideoQueue().Finish();
2301 void HandleAudioWaited(MediaData::Type aType
) override
{
2302 // We don't care about audio in this state.
2305 void HandleVideoWaited(MediaData::Type aType
) override
{
2306 MOZ_ASSERT(!mSeekJob
.mPromise
.IsEmpty(), "Seek shouldn't be finished");
2307 MOZ_ASSERT(NeedMoreVideo());
2311 TimeUnit
CalculateNewCurrentTime() const override
{
2312 // The HTMLMediaElement.currentTime should be updated to the seek target
2313 // which has been updated to the next frame's time.
2314 return mSeekJob
.mTarget
->GetTime();
2317 void DoSeek() override
{
2318 mMaster
->StopMediaSink();
2320 auto currentTime
= mCurrentTime
;
2321 DiscardFrames(VideoQueue(), [currentTime
](int64_t aSampleTime
) {
2322 return aSampleTime
<= currentTime
.ToMicroseconds();
2325 // If there is a pending video request, finish the seeking if we don't need
2326 // more data, or wait for HandleVideoDecoded() to finish seeking.
2327 if (mMaster
->IsRequestingVideoData()) {
2328 if (!NeedMoreVideo()) {
2334 // Otherwise, we need to do the seek operation asynchronously for a special
2335 // case (video with no data)which has no data at all, the 1st
2336 // seekToNextFrame() operation reaches the end of the media. If we did the
2337 // seek operation synchronously, we immediately resolve the SeekPromise in
2338 // mSeekJob and then switch to the CompletedState which dispatches an
2339 // "ended" event. However, the ThenValue of the SeekPromise has not yet been
2340 // set, so the promise resolving is postponed and then the JS developer
2341 // receives the "ended" event before the seek promise is resolved. An
2342 // asynchronous seek operation helps to solve this issue since while the
2343 // seek is actually performed, the ThenValue of SeekPromise has already been
2344 // set so that it won't be postponed.
2345 RefPtr
<Runnable
> r
= mAsyncSeekTask
= new AysncNextFrameSeekTask(this);
2346 nsresult rv
= OwnerThread()->Dispatch(r
.forget());
2347 MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv
));
2352 void DoSeekInternal() {
2353 // We don't need to discard frames to the mCurrentTime here because we have
2354 // done it at DoSeek() and any video data received in between either
2355 // finishes the seek operation or be discarded, see HandleVideoDecoded().
2357 if (!NeedMoreVideo()) {
2359 } else if (!mMaster
->IsTrackingVideoData()) {
2364 class AysncNextFrameSeekTask
: public Runnable
{
2366 explicit AysncNextFrameSeekTask(NextFrameSeekingState
* aStateObject
)
2368 "MediaDecoderStateMachine::NextFrameSeekingState::"
2369 "AysncNextFrameSeekTask"),
2370 mStateObj(aStateObject
) {}
2372 void Cancel() { mStateObj
= nullptr; }
2374 NS_IMETHOD
Run() override
{
2376 AUTO_PROFILER_LABEL("AysncNextFrameSeekTask::Run", MEDIA_PLAYBACK
);
2377 mStateObj
->DoSeekInternal();
2383 NextFrameSeekingState
* mStateObj
;
2386 void RequestVideoData() { mMaster
->RequestVideoData(media::TimeUnit()); }
2388 bool NeedMoreVideo() const {
2389 // Need to request video when we have none and video queue is not finished.
2390 return VideoQueue().GetSize() == 0 && !VideoQueue().IsFinished();
2393 // Update the seek target's time before resolving this seek task, the updated
2394 // time will be used in the MDSM::SeekCompleted() to update the MDSM's
2396 void UpdateSeekTargetTime() {
2397 RefPtr
<VideoData
> data
= VideoQueue().PeekFront();
2399 mSeekJob
.mTarget
->SetTime(data
->mTime
);
2401 MOZ_ASSERT(VideoQueue().AtEndOfStream());
2402 mSeekJob
.mTarget
->SetTime(mDuration
);
2407 MOZ_ASSERT(!NeedMoreVideo());
2408 UpdateSeekTargetTime();
2409 auto time
= mSeekJob
.mTarget
->GetTime().ToMicroseconds();
2410 DiscardFrames(AudioQueue(),
2411 [time
](int64_t aSampleTime
) { return aSampleTime
< time
; });
2418 TimeUnit mCurrentTime
;
2420 RefPtr
<AysncNextFrameSeekTask
> mAsyncSeekTask
;
2423 class MediaDecoderStateMachine::NextFrameSeekingFromDormantState
2424 : public MediaDecoderStateMachine::AccurateSeekingState
{
2426 explicit NextFrameSeekingFromDormantState(Master
* aPtr
)
2427 : AccurateSeekingState(aPtr
) {}
2429 State
GetState() const override
{ return DECODER_STATE_SEEKING_FROMDORMANT
; }
2431 RefPtr
<MediaDecoder::SeekPromise
> Enter(SeekJob
&& aCurrentSeekJob
,
2432 SeekJob
&& aFutureSeekJob
) {
2433 mFutureSeekJob
= std::move(aFutureSeekJob
);
2435 AccurateSeekingState::Enter(std::move(aCurrentSeekJob
),
2436 EventVisibility::Suppressed
);
2438 // Once seekToNextFrame() is called, we assume the user is likely to keep
2439 // calling seekToNextFrame() repeatedly, and so, we should prevent the MDSM
2440 // from getting into Dormant state.
2441 mMaster
->mMinimizePreroll
= false;
2443 return mFutureSeekJob
.mPromise
.Ensure(__func__
);
2446 void Exit() override
{
2447 mFutureSeekJob
.RejectIfExists(__func__
);
2448 AccurateSeekingState::Exit();
2452 SeekJob mFutureSeekJob
;
2454 // We don't want to transition to DecodingState once this seek completes,
2455 // instead, we transition to NextFrameSeekingState.
2456 void GoToNextState() override
{
2457 SetState
<NextFrameSeekingState
>(std::move(mFutureSeekJob
),
2458 EventVisibility::Observable
);
2462 class MediaDecoderStateMachine::VideoOnlySeekingState
2463 : public MediaDecoderStateMachine::AccurateSeekingState
{
2465 explicit VideoOnlySeekingState(Master
* aPtr
) : AccurateSeekingState(aPtr
) {}
2467 State
GetState() const override
{ return DECODER_STATE_SEEKING_VIDEOONLY
; }
2469 RefPtr
<MediaDecoder::SeekPromise
> Enter(SeekJob
&& aSeekJob
,
2470 EventVisibility aVisibility
) {
2471 MOZ_ASSERT(aSeekJob
.mTarget
->IsVideoOnly());
2472 MOZ_ASSERT(aVisibility
== EventVisibility::Suppressed
);
2474 RefPtr
<MediaDecoder::SeekPromise
> p
=
2475 AccurateSeekingState::Enter(std::move(aSeekJob
), aVisibility
);
2477 // Dispatch a mozvideoonlyseekbegin event to indicate UI for corresponding
2479 mMaster
->mOnPlaybackEvent
.Notify(MediaPlaybackEvent::VideoOnlySeekBegin
);
2484 void Exit() override
{
2485 // We are completing or discarding this video-only seek operation now,
2486 // dispatch an event so that the UI can change in response to the end
2487 // of video-only seek.
2488 mMaster
->mOnPlaybackEvent
.Notify(
2489 MediaPlaybackEvent::VideoOnlySeekCompleted
);
2491 AccurateSeekingState::Exit();
2494 void HandleAudioDecoded(AudioData
* aAudio
) override
{
2495 MOZ_ASSERT(mDoneAudioSeeking
&& !mDoneVideoSeeking
,
2496 "Seek shouldn't be finished");
2499 // Video-only seek doesn't reset audio decoder. There might be pending audio
2500 // requests when AccurateSeekTask::Seek() begins. We will just store the
2501 // data without checking |mDiscontinuity| or calling
2502 // DropAudioUpToSeekTarget().
2503 mMaster
->PushAudio(aAudio
);
2506 void HandleWaitingForAudio() override
{}
2508 void HandleAudioCanceled() override
{}
2510 void HandleEndOfAudio() override
{}
2512 void HandleAudioWaited(MediaData::Type aType
) override
{
2513 MOZ_ASSERT(!mDoneAudioSeeking
|| !mDoneVideoSeeking
,
2514 "Seek shouldn't be finished");
2516 // Ignore pending requests from video-only seek.
2519 void DoSeek() override
{
2520 // TODO: keep decoding audio.
2521 mDoneAudioSeeking
= true;
2522 mDoneVideoSeeking
= !Info().HasVideo();
2524 const auto offset
= VideoQueue().GetOffset();
2525 mMaster
->ResetDecode(TrackInfo::kVideoTrack
);
2527 // Entering video-only state and we've looped at least once before, so we
2528 // need to set offset in order to let new video frames catch up with the
2530 if (offset
!= media::TimeUnit::Zero()) {
2531 VideoQueue().SetOffset(offset
);
2538 // Allow skip-to-next-key-frame to kick in if we fall behind the current
2539 // playback position so decoding has a better chance to catch up.
2540 void RequestVideoData() override
{
2541 MOZ_ASSERT(!mDoneVideoSeeking
);
2543 auto clock
= mMaster
->mMediaSink
->IsStarted() ? mMaster
->GetClock()
2544 : mMaster
->GetMediaTime();
2545 mMaster
->AdjustByLooping(clock
);
2546 const auto& nextKeyFrameTime
= GetNextKeyFrameTime();
2548 auto threshold
= clock
;
2550 if (nextKeyFrameTime
.IsValid() &&
2551 clock
>= (nextKeyFrameTime
- sSkipToNextKeyFrameThreshold
)) {
2552 threshold
= nextKeyFrameTime
;
2555 mMaster
->RequestVideoData(threshold
);
2559 // Trigger skip to next key frame if the current playback position is very
2560 // close the next key frame's time.
2561 static constexpr TimeUnit sSkipToNextKeyFrameThreshold
=
2562 TimeUnit::FromMicroseconds(5000);
2564 // If the media is playing, drop video until catch up playback position.
2565 media::TimeUnit
GetSeekTarget() const override
{
2566 auto target
= mMaster
->mMediaSink
->IsStarted()
2567 ? mMaster
->GetClock()
2568 : mSeekJob
.mTarget
->GetTime();
2569 mMaster
->AdjustByLooping(target
);
2573 media::TimeUnit
GetNextKeyFrameTime() const {
2574 // We only call this method in RequestVideoData() and we only request video
2575 // data if we haven't done video seeking.
2576 MOZ_DIAGNOSTIC_ASSERT(!mDoneVideoSeeking
);
2577 MOZ_DIAGNOSTIC_ASSERT(mMaster
->VideoQueue().GetSize() == 0);
2579 if (mFirstVideoFrameAfterSeek
) {
2580 return mFirstVideoFrameAfterSeek
->NextKeyFrameTime();
2583 return TimeUnit::Invalid();
2587 constexpr TimeUnit
MediaDecoderStateMachine::VideoOnlySeekingState::
2588 sSkipToNextKeyFrameThreshold
;
2590 RefPtr
<MediaDecoder::SeekPromise
>
2591 MediaDecoderStateMachine::DormantState::HandleSeek(const SeekTarget
& aTarget
) {
2592 if (aTarget
.IsNextFrame()) {
2593 // NextFrameSeekingState doesn't reset the decoder unlike
2594 // AccurateSeekingState. So we first must come out of dormant by seeking to
2595 // mPendingSeek and continue later with the NextFrameSeek
2596 SLOG("Changed state to SEEKING (to %" PRId64
")",
2597 aTarget
.GetTime().ToMicroseconds());
2599 seekJob
.mTarget
= Some(aTarget
);
2600 return StateObject::SetState
<NextFrameSeekingFromDormantState
>(
2601 std::move(mPendingSeek
), std::move(seekJob
));
2604 return StateObject::HandleSeek(aTarget
);
2608 * Purpose: stop playback until enough data is decoded to continue playback.
2611 * SEEKING if any seek request.
2612 * SHUTDOWN if any decode error.
2613 * COMPLETED when having decoded all audio/video data.
2614 * DECODING/LOOPING_DECODING when having decoded enough data to continue
2617 class MediaDecoderStateMachine::BufferingState
2618 : public MediaDecoderStateMachine::StateObject
{
2620 explicit BufferingState(Master
* aPtr
) : StateObject(aPtr
) {}
2623 if (mMaster
->IsPlaying()) {
2624 mMaster
->StopPlayback();
2627 mBufferingStart
= TimeStamp::Now();
2628 // Playback is now stopped, so there is no need to connect to the queues'
2629 // PopFrontEvent()s, but frames may have been recently popped before the
2630 // transition from DECODING.
2631 if (mMaster
->IsAudioDecoding() && !mMaster
->HaveEnoughDecodedAudio() &&
2632 !mMaster
->IsTrackingAudioData()) {
2633 mMaster
->RequestAudioData();
2635 if (mMaster
->IsVideoDecoding() && !mMaster
->HaveEnoughDecodedVideo() &&
2636 !mMaster
->IsTrackingVideoData()) {
2637 mMaster
->RequestVideoData(TimeUnit());
2640 mMaster
->ScheduleStateMachineIn(TimeUnit::FromMicroseconds(USECS_PER_S
));
2641 mMaster
->mOnNextFrameStatus
.Notify(
2642 MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_BUFFERING
);
2645 void Step() override
;
2647 State
GetState() const override
{ return DECODER_STATE_BUFFERING
; }
2649 void HandleAudioDecoded(AudioData
* aAudio
) override
{
2650 mMaster
->PushAudio(aAudio
);
2651 if (!mMaster
->HaveEnoughDecodedAudio()) {
2652 mMaster
->RequestAudioData();
2654 // This might be the sample we need to exit buffering.
2655 // Schedule Step() to check it.
2656 mMaster
->ScheduleStateMachine();
2659 void HandleVideoDecoded(VideoData
* aVideo
) override
{
2660 mMaster
->PushVideo(aVideo
);
2661 if (!mMaster
->HaveEnoughDecodedVideo()) {
2662 mMaster
->RequestVideoData(media::TimeUnit());
2664 // This might be the sample we need to exit buffering.
2665 // Schedule Step() to check it.
2666 mMaster
->ScheduleStateMachine();
2669 void HandleAudioCanceled() override
{ mMaster
->RequestAudioData(); }
2671 void HandleVideoCanceled() override
{
2672 mMaster
->RequestVideoData(media::TimeUnit());
2675 void HandleWaitingForAudio() override
{
2676 mMaster
->WaitForData(MediaData::Type::AUDIO_DATA
);
2679 void HandleWaitingForVideo() override
{
2680 mMaster
->WaitForData(MediaData::Type::VIDEO_DATA
);
2683 void HandleAudioWaited(MediaData::Type aType
) override
{
2684 mMaster
->RequestAudioData();
2687 void HandleVideoWaited(MediaData::Type aType
) override
{
2688 mMaster
->RequestVideoData(media::TimeUnit());
2691 void HandleEndOfAudio() override
;
2692 void HandleEndOfVideo() override
;
2694 void HandleVideoSuspendTimeout() override
{
2695 // No video, so nothing to suspend.
2696 if (!mMaster
->HasVideo()) {
2700 mMaster
->mVideoDecodeSuspended
= true;
2701 mMaster
->mOnPlaybackEvent
.Notify(MediaPlaybackEvent::EnterVideoSuspend
);
2702 Reader()->SetVideoBlankDecode(true);
2706 TimeStamp mBufferingStart
;
2708 // The maximum number of second we spend buffering when we are short on
2710 const uint32_t mBufferingWait
= 15;
2714 * Purpose: play all the decoded data and fire the 'ended' event.
2717 * SEEKING if any seek request.
2718 * LOOPING_DECODING if MDSM enable looping.
2720 class MediaDecoderStateMachine::CompletedState
2721 : public MediaDecoderStateMachine::StateObject
{
2723 explicit CompletedState(Master
* aPtr
) : StateObject(aPtr
) {}
2726 // On Android, the life cycle of graphic buffer is equal to Android's codec,
2727 // we couldn't release it if we still need to render the frame.
2728 #ifndef MOZ_WIDGET_ANDROID
2729 if (!mMaster
->mLooping
) {
2730 // We've decoded all samples.
2731 // We don't need decoders anymore if not looping.
2732 Reader()->ReleaseResources();
2735 bool hasNextFrame
= (!mMaster
->HasAudio() || !mMaster
->mAudioCompleted
) &&
2736 (!mMaster
->HasVideo() || !mMaster
->mVideoCompleted
);
2738 mMaster
->mOnNextFrameStatus
.Notify(
2739 hasNextFrame
? MediaDecoderOwner::NEXT_FRAME_AVAILABLE
2740 : MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE
);
2745 void Exit() override
{ mSentPlaybackEndedEvent
= false; }
2747 void Step() override
{
2748 if (mMaster
->mPlayState
!= MediaDecoder::PLAY_STATE_PLAYING
&&
2749 mMaster
->IsPlaying()) {
2750 mMaster
->StopPlayback();
2753 // Play the remaining media. We want to run AdvanceFrame() at least
2754 // once to ensure the current playback position is advanced to the
2755 // end of the media, and so that we update the readyState.
2756 if ((mMaster
->HasVideo() && !mMaster
->mVideoCompleted
) ||
2757 (mMaster
->HasAudio() && !mMaster
->mAudioCompleted
)) {
2758 // Start playback if necessary to play the remaining media.
2759 mMaster
->MaybeStartPlayback();
2760 mMaster
->UpdatePlaybackPositionPeriodically();
2761 MOZ_ASSERT(!mMaster
->IsPlaying() || mMaster
->IsStateMachineScheduled(),
2762 "Must have timer scheduled");
2766 // StopPlayback in order to reset the IsPlaying() state so audio
2767 // is restarted correctly.
2768 mMaster
->StopPlayback();
2770 if (!mSentPlaybackEndedEvent
) {
2772 std::max(mMaster
->AudioEndTime(), mMaster
->VideoEndTime());
2773 // Correct the time over the end once looping was turned on.
2774 mMaster
->AdjustByLooping(clockTime
);
2775 if (mMaster
->mDuration
.Ref()->IsInfinite()) {
2776 // We have a finite duration when playback reaches the end.
2777 mMaster
->mDuration
= Some(clockTime
);
2778 DDLOGEX(mMaster
, DDLogCategory::Property
, "duration_us",
2779 mMaster
->mDuration
.Ref()->ToMicroseconds());
2781 mMaster
->UpdatePlaybackPosition(clockTime
);
2783 // Ensure readyState is updated before firing the 'ended' event.
2784 mMaster
->mOnNextFrameStatus
.Notify(
2785 MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE
);
2787 mMaster
->mOnPlaybackEvent
.Notify(MediaPlaybackEvent::PlaybackEnded
);
2789 mSentPlaybackEndedEvent
= true;
2791 // MediaSink::GetEndTime() must be called before stopping playback.
2792 mMaster
->StopMediaSink();
2796 State
GetState() const override
{ return DECODER_STATE_COMPLETED
; }
2798 void HandleLoopingChanged() override
{
2799 if (mMaster
->mLooping
) {
2804 void HandleAudioCaptured() override
{
2805 // MediaSink is changed. Schedule Step() to check if we can start playback.
2806 mMaster
->ScheduleStateMachine();
2809 void HandleVideoSuspendTimeout() override
{
2810 // Do nothing since no decoding is going on.
2813 void HandleResumeVideoDecoding(const TimeUnit
&) override
{
2814 // Resume the video decoder and seek to the last video frame.
2815 // This triggers a video-only seek which won't update the playback position.
2816 auto target
= mMaster
->mDecodedVideoEndTime
;
2817 mMaster
->AdjustByLooping(target
);
2818 StateObject::HandleResumeVideoDecoding(target
);
2821 void HandlePlayStateChanged(MediaDecoder::PlayState aPlayState
) override
{
2822 if (aPlayState
== MediaDecoder::PLAY_STATE_PLAYING
) {
2823 // Schedule Step() to check if we can start playback.
2824 mMaster
->ScheduleStateMachine();
2829 bool mSentPlaybackEndedEvent
= false;
2833 * Purpose: release all resources allocated by MDSM.
2836 * None since this is the final state.
2839 * Any states other than SHUTDOWN.
2841 class MediaDecoderStateMachine::ShutdownState
2842 : public MediaDecoderStateMachine::StateObject
{
2844 explicit ShutdownState(Master
* aPtr
) : StateObject(aPtr
) {}
2846 RefPtr
<ShutdownPromise
> Enter();
2848 void Exit() override
{
2849 MOZ_DIAGNOSTIC_CRASH("Shouldn't escape the SHUTDOWN state.");
2852 State
GetState() const override
{ return DECODER_STATE_SHUTDOWN
; }
2854 RefPtr
<MediaDecoder::SeekPromise
> HandleSeek(
2855 const SeekTarget
& aTarget
) override
{
2856 MOZ_DIAGNOSTIC_CRASH("Can't seek in shutdown state.");
2857 return MediaDecoder::SeekPromise::CreateAndReject(true, __func__
);
2860 RefPtr
<ShutdownPromise
> HandleShutdown() override
{
2861 MOZ_DIAGNOSTIC_CRASH("Already shutting down.");
2865 void HandleVideoSuspendTimeout() override
{
2866 MOZ_DIAGNOSTIC_CRASH("Already shutting down.");
2869 void HandleResumeVideoDecoding(const TimeUnit
&) override
{
2870 MOZ_DIAGNOSTIC_CRASH("Already shutting down.");
2874 RefPtr
<MediaDecoder::SeekPromise
>
2875 MediaDecoderStateMachine::StateObject::HandleSeek(const SeekTarget
& aTarget
) {
2876 SLOG("Changed state to SEEKING (to %" PRId64
")",
2877 aTarget
.GetTime().ToMicroseconds());
2879 seekJob
.mTarget
= Some(aTarget
);
2880 return SetSeekingState(std::move(seekJob
), EventVisibility::Observable
);
2883 RefPtr
<ShutdownPromise
>
2884 MediaDecoderStateMachine::StateObject::HandleShutdown() {
2885 return SetState
<ShutdownState
>();
2888 void MediaDecoderStateMachine::StateObject::HandleResumeVideoDecoding(
2889 const TimeUnit
& aTarget
) {
2890 MOZ_ASSERT(mMaster
->mVideoDecodeSuspended
);
2892 mMaster
->mVideoDecodeSuspended
= false;
2893 mMaster
->mOnPlaybackEvent
.Notify(MediaPlaybackEvent::ExitVideoSuspend
);
2894 Reader()->SetVideoBlankDecode(false);
2896 // Start video-only seek to the current time.
2899 // We use fastseek to optimize the resuming time.
2900 // FastSeek is only used for video-only media since we don't need to worry
2902 // Don't use fastSeek if we want to seek to the end because it might seek to a
2903 // keyframe before the last frame (if the last frame itself is not a keyframe)
2904 // and we always want to present the final frame to the user when seeking to
2906 const auto type
= mMaster
->HasAudio() || aTarget
== mMaster
->Duration()
2907 ? SeekTarget::Type::Accurate
2908 : SeekTarget::Type::PrevSyncPoint
;
2910 seekJob
.mTarget
.emplace(aTarget
, type
, SeekTarget::Track::VideoOnly
);
2911 SLOG("video-only seek target=%" PRId64
", current time=%" PRId64
,
2912 aTarget
.ToMicroseconds(), mMaster
->GetMediaTime().ToMicroseconds());
2914 SetSeekingState(std::move(seekJob
), EventVisibility::Suppressed
);
2917 RefPtr
<MediaDecoder::SeekPromise
>
2918 MediaDecoderStateMachine::StateObject::SetSeekingState(
2919 SeekJob
&& aSeekJob
, EventVisibility aVisibility
) {
2920 if (aSeekJob
.mTarget
->IsAccurate() || aSeekJob
.mTarget
->IsFast()) {
2921 if (aSeekJob
.mTarget
->IsVideoOnly()) {
2922 return SetState
<VideoOnlySeekingState
>(std::move(aSeekJob
), aVisibility
);
2924 return SetState
<AccurateSeekingState
>(std::move(aSeekJob
), aVisibility
);
2927 if (aSeekJob
.mTarget
->IsNextFrame()) {
2928 return SetState
<NextFrameSeekingState
>(std::move(aSeekJob
), aVisibility
);
2931 MOZ_ASSERT_UNREACHABLE("Unknown SeekTarget::Type.");
2935 void MediaDecoderStateMachine::StateObject::SetDecodingState() {
2936 if (mMaster
->IsInSeamlessLooping()) {
2937 SetState
<LoopingDecodingState
>();
2940 SetState
<DecodingState
>();
2943 void MediaDecoderStateMachine::DecodeMetadataState::OnMetadataRead(
2944 MetadataHolder
&& aMetadata
) {
2945 mMetadataRequest
.Complete();
2947 AUTO_PROFILER_LABEL("DecodeMetadataState::OnMetadataRead", MEDIA_PLAYBACK
);
2948 mMaster
->mInfo
.emplace(*aMetadata
.mInfo
);
2949 mMaster
->mMediaSeekable
= Info().mMediaSeekable
;
2950 mMaster
->mMediaSeekableOnlyInBufferedRanges
=
2951 Info().mMediaSeekableOnlyInBufferedRanges
;
2953 if (Info().mMetadataDuration
.isSome()) {
2954 mMaster
->mDuration
= Info().mMetadataDuration
;
2955 } else if (Info().mUnadjustedMetadataEndTime
.isSome()) {
2956 const TimeUnit unadjusted
= Info().mUnadjustedMetadataEndTime
.ref();
2957 const TimeUnit adjustment
= Info().mStartTime
;
2958 SLOG("No metadata duration, calculate one. unadjusted=%" PRId64
2959 ", adjustment=%" PRId64
,
2960 unadjusted
.ToMicroseconds(), adjustment
.ToMicroseconds());
2961 mMaster
->mInfo
->mMetadataDuration
.emplace(unadjusted
- adjustment
);
2962 mMaster
->mDuration
= Info().mMetadataDuration
;
2965 // If we don't know the duration by this point, we assume infinity, per spec.
2966 if (mMaster
->mDuration
.Ref().isNothing()) {
2967 mMaster
->mDuration
= Some(TimeUnit::FromInfinity());
2970 DDLOGEX(mMaster
, DDLogCategory::Property
, "duration_us",
2971 mMaster
->mDuration
.Ref()->ToMicroseconds());
2973 if (mMaster
->HasVideo()) {
2974 SLOG("Video decode HWAccel=%d videoQueueSize=%d",
2975 Reader()->VideoIsHardwareAccelerated(),
2976 mMaster
->GetAmpleVideoFrames());
2979 MOZ_ASSERT(mMaster
->mDuration
.Ref().isSome());
2980 SLOG("OnMetadataRead, duration=%" PRId64
,
2981 mMaster
->mDuration
.Ref()->ToMicroseconds());
2983 mMaster
->mMetadataLoadedEvent
.Notify(std::move(aMetadata
.mInfo
),
2984 std::move(aMetadata
.mTags
),
2985 MediaDecoderEventVisibility::Observable
);
2987 // Check whether the media satisfies the requirement of seamless looping.
2988 // TODO : after we ensure video seamless looping is stable enough, then we can
2989 // remove this to make the condition always true.
2990 mMaster
->mSeamlessLoopingAllowed
= StaticPrefs::media_seamless_looping();
2991 if (mMaster
->HasVideo()) {
2992 mMaster
->mSeamlessLoopingAllowed
=
2993 StaticPrefs::media_seamless_looping_video();
2996 SetState
<DecodingFirstFrameState
>();
2999 void MediaDecoderStateMachine::DormantState::HandlePlayStateChanged(
3000 MediaDecoder::PlayState aPlayState
) {
3001 if (aPlayState
== MediaDecoder::PLAY_STATE_PLAYING
) {
3002 // Exit dormant when the user wants to play.
3003 MOZ_ASSERT(mMaster
->mSentFirstFrameLoadedEvent
);
3004 SetSeekingState(std::move(mPendingSeek
), EventVisibility::Suppressed
);
3008 void MediaDecoderStateMachine::DecodingFirstFrameState::Enter() {
3009 // Transition to DECODING if we've decoded first frames.
3010 if (mMaster
->mSentFirstFrameLoadedEvent
) {
3015 MOZ_ASSERT(!mMaster
->mVideoDecodeSuspended
);
3017 // Dispatch tasks to decode first frames.
3018 if (mMaster
->HasAudio()) {
3019 mMaster
->RequestAudioData();
3021 if (mMaster
->HasVideo()) {
3022 mMaster
->RequestVideoData(media::TimeUnit());
3026 void MediaDecoderStateMachine::DecodingFirstFrameState::
3027 MaybeFinishDecodeFirstFrame() {
3028 MOZ_ASSERT(!mMaster
->mSentFirstFrameLoadedEvent
);
3030 if ((mMaster
->IsAudioDecoding() && AudioQueue().GetSize() == 0) ||
3031 (mMaster
->IsVideoDecoding() && VideoQueue().GetSize() == 0)) {
3035 mMaster
->FinishDecodeFirstFrame();
3036 if (mPendingSeek
.Exists()) {
3037 SetSeekingState(std::move(mPendingSeek
), EventVisibility::Observable
);
3043 void MediaDecoderStateMachine::DecodingState::Enter() {
3044 MOZ_ASSERT(mMaster
->mSentFirstFrameLoadedEvent
);
3046 if (mMaster
->mVideoDecodeSuspended
&&
3047 mMaster
->mVideoDecodeMode
== VideoDecodeMode::Normal
) {
3048 StateObject::HandleResumeVideoDecoding(mMaster
->GetMediaTime());
3052 if (mMaster
->mVideoDecodeMode
== VideoDecodeMode::Suspend
&&
3053 !mMaster
->mVideoDecodeSuspendTimer
.IsScheduled() &&
3054 !mMaster
->mVideoDecodeSuspended
) {
3055 // If the VideoDecodeMode is Suspend and the timer is not schedule, it means
3056 // the timer has timed out and we should suspend video decoding now if
3058 HandleVideoSuspendTimeout();
3061 // If we're in the normal decoding mode and the decoding has finished, then we
3062 // should go to `completed` state because we don't need to decode anything
3063 // later. However, if we're in the saemless decoding mode, we will restart
3064 // decoding ASAP so we can still stay in `decoding` state.
3065 if (!mMaster
->IsVideoDecoding() && !mMaster
->IsAudioDecoding() &&
3066 !mMaster
->IsInSeamlessLooping()) {
3067 SetState
<CompletedState
>();
3072 AudioQueue().PopFrontEvent().Connect(OwnerThread(), [this]() {
3073 AUTO_PROFILER_LABEL("MediaDecoderStateMachine::OnAudioPopped",
3075 if (mMaster
->IsAudioDecoding() && !mMaster
->HaveEnoughDecodedAudio()) {
3076 EnsureAudioDecodeTaskQueued();
3080 VideoQueue().PopFrontEvent().Connect(OwnerThread(), [this]() {
3081 AUTO_PROFILER_LABEL("MediaDecoderStateMachine::OnVideoPopped",
3083 if (mMaster
->IsVideoDecoding() && !mMaster
->HaveEnoughDecodedVideo()) {
3084 EnsureVideoDecodeTaskQueued();
3088 mMaster
->mOnNextFrameStatus
.Notify(MediaDecoderOwner::NEXT_FRAME_AVAILABLE
);
3090 mDecodeStartTime
= TimeStamp::Now();
3092 MaybeStopPrerolling();
3094 // Ensure that we've got tasks enqueued to decode data if we need to.
3095 DispatchDecodeTasksIfNeeded();
3097 mMaster
->ScheduleStateMachine();
3099 // Will enter dormant when playback is paused for a while.
3100 if (mMaster
->mPlayState
== MediaDecoder::PLAY_STATE_PAUSED
) {
3101 StartDormantTimer();
3105 void MediaDecoderStateMachine::DecodingState::Step() {
3106 if (mMaster
->mPlayState
!= MediaDecoder::PLAY_STATE_PLAYING
&&
3107 mMaster
->IsPlaying()) {
3108 // We're playing, but the element/decoder is in paused state. Stop
3110 mMaster
->StopPlayback();
3113 // Start playback if necessary so that the clock can be properly queried.
3114 if (!mIsPrerolling
) {
3115 mMaster
->MaybeStartPlayback();
3118 mMaster
->UpdatePlaybackPositionPeriodically();
3119 MOZ_ASSERT(!mMaster
->IsPlaying() || mMaster
->IsStateMachineScheduled(),
3120 "Must have timer scheduled");
3121 if (IsBufferingAllowed()) {
3122 MaybeStartBuffering();
3126 void MediaDecoderStateMachine::DecodingState::HandleEndOfAudio() {
3127 AudioQueue().Finish();
3128 if (!mMaster
->IsVideoDecoding()) {
3129 SetState
<CompletedState
>();
3131 MaybeStopPrerolling();
3135 void MediaDecoderStateMachine::DecodingState::HandleEndOfVideo() {
3136 VideoQueue().Finish();
3137 if (!mMaster
->IsAudioDecoding()) {
3138 SetState
<CompletedState
>();
3140 MaybeStopPrerolling();
3144 void MediaDecoderStateMachine::DecodingState::DispatchDecodeTasksIfNeeded() {
3145 if (mMaster
->IsAudioDecoding() && !mMaster
->mMinimizePreroll
&&
3146 !mMaster
->HaveEnoughDecodedAudio()) {
3147 EnsureAudioDecodeTaskQueued();
3150 if (mMaster
->IsVideoDecoding() && !mMaster
->mMinimizePreroll
&&
3151 !mMaster
->HaveEnoughDecodedVideo()) {
3152 EnsureVideoDecodeTaskQueued();
3156 void MediaDecoderStateMachine::DecodingState::EnsureAudioDecodeTaskQueued() {
3157 if (!mMaster
->IsAudioDecoding() || mMaster
->IsTrackingAudioData()) {
3160 mMaster
->RequestAudioData();
3163 void MediaDecoderStateMachine::DecodingState::EnsureVideoDecodeTaskQueued() {
3164 if (!mMaster
->IsVideoDecoding() || mMaster
->IsTrackingVideoData()) {
3167 mMaster
->RequestVideoData(mMaster
->GetMediaTime(),
3168 ShouldRequestNextKeyFrame());
3171 void MediaDecoderStateMachine::DecodingState::MaybeStartBuffering() {
3172 // Buffering makes senses only after decoding first frames.
3173 MOZ_ASSERT(mMaster
->mSentFirstFrameLoadedEvent
);
3175 // Don't enter buffering when MediaDecoder is not playing.
3176 if (mMaster
->mPlayState
!= MediaDecoder::PLAY_STATE_PLAYING
) {
3180 // Don't enter buffering while prerolling so that the decoder has a chance to
3181 // enqueue some decoded data before we give up and start buffering.
3182 if (!mMaster
->IsPlaying()) {
3186 // Note we could have a wait promise pending when playing non-MSE EME.
3187 if (mMaster
->OutOfDecodedAudio() && mMaster
->IsWaitingAudioData()) {
3188 PROFILER_MARKER_TEXT("MDSM::StartBuffering", MEDIA_PLAYBACK
, {},
3189 "OutOfDecodedAudio");
3190 SLOG("Enter buffering due to out of decoded audio");
3191 SetState
<BufferingState
>();
3194 if (mMaster
->OutOfDecodedVideo() && mMaster
->IsWaitingVideoData()) {
3195 PROFILER_MARKER_TEXT("MDSM::StartBuffering", MEDIA_PLAYBACK
, {},
3196 "OutOfDecodedVideo");
3197 SLOG("Enter buffering due to out of decoded video");
3198 SetState
<BufferingState
>();
3202 if (Reader()->UseBufferingHeuristics() && mMaster
->HasLowDecodedData() &&
3203 mMaster
->HasLowBufferedData() && !mMaster
->mCanPlayThrough
) {
3204 PROFILER_MARKER_TEXT("MDSM::StartBuffering", MEDIA_PLAYBACK
, {},
3205 "BufferingHeuristics");
3206 SLOG("Enter buffering due to buffering heruistics");
3207 SetState
<BufferingState
>();
3211 void MediaDecoderStateMachine::LoopingDecodingState::HandleError(
3212 const MediaResult
& aError
, bool aIsAudio
) {
3213 SLOG("%s looping failed, aError=%s", aIsAudio
? "audio" : "video",
3214 aError
.ErrorName().get());
3215 switch (aError
.Code()) {
3216 case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA
:
3218 HandleWaitingForAudio();
3220 HandleWaitingForVideo();
3223 case NS_ERROR_DOM_MEDIA_END_OF_STREAM
:
3224 // This could happen after either the resource has been close, or the data
3225 // hasn't been appended in MSE, so that we won't be able to get any
3226 // sample and need to fallback to normal looping.
3227 if (mIsReachingAudioEOS
&& mIsReachingVideoEOS
) {
3228 SetState
<CompletedState
>();
3232 mMaster
->DecodeError(aError
);
3237 void MediaDecoderStateMachine::SeekingState::SeekCompleted() {
3238 const auto newCurrentTime
= CalculateNewCurrentTime();
3240 if ((newCurrentTime
== mMaster
->Duration() ||
3241 newCurrentTime
.EqualsAtLowestResolution(
3242 mMaster
->Duration().ToBase(USECS_PER_S
))) &&
3243 !mMaster
->IsLiveStream()) {
3244 SLOG("Seek completed, seeked to end: %s", newCurrentTime
.ToString().get());
3245 // will transition to COMPLETED immediately. Note we don't do
3246 // this when playing a live stream, since the end of media will advance
3247 // once we download more data!
3248 AudioQueue().Finish();
3249 VideoQueue().Finish();
3251 // We won't start MediaSink when paused. m{Audio,Video}Completed will
3252 // remain false and 'playbackEnded' won't be notified. Therefore we
3253 // need to set these flags explicitly when seeking to the end.
3254 mMaster
->mAudioCompleted
= true;
3255 mMaster
->mVideoCompleted
= true;
3257 // There might still be a pending audio request when doing video-only or
3258 // next-frame seek. Discard it so we won't break the invariants of the
3259 // COMPLETED state by adding audio samples to a finished queue.
3260 mMaster
->mAudioDataRequest
.DisconnectIfExists();
3263 // We want to resolve the seek request prior finishing the first frame
3264 // to ensure that the seeked event is fired prior loadeded.
3265 // Note: SeekJob.Resolve() resets SeekJob.mTarget. Don't use mSeekJob anymore
3267 mSeekJob
.Resolve(__func__
);
3269 // Notify FirstFrameLoaded now if we haven't since we've decoded some data
3270 // for readyState to transition to HAVE_CURRENT_DATA and fire 'loadeddata'.
3271 if (!mMaster
->mSentFirstFrameLoadedEvent
) {
3272 mMaster
->FinishDecodeFirstFrame();
3275 // Ensure timestamps are up to date.
3276 // Suppressed visibility comes from two cases: (1) leaving dormant state,
3277 // and (2) resuming suspended video decoder. We want both cases to be
3278 // transparent to the user. So we only notify the change when the seek
3279 // request is from the user.
3280 if (mVisibility
== EventVisibility::Observable
) {
3281 // Don't update playback position for video-only seek.
3282 // Otherwise we might have |newCurrentTime > mMediaSink->GetPosition()|
3283 // and fail the assertion in GetClock() since we didn't stop MediaSink.
3284 mMaster
->UpdatePlaybackPositionInternal(newCurrentTime
);
3287 // Try to decode another frame to detect if we're at the end...
3288 SLOG("Seek completed, mCurrentPosition=%" PRId64
,
3289 mMaster
->mCurrentPosition
.Ref().ToMicroseconds());
3291 if (mMaster
->VideoQueue().PeekFront()) {
3292 mMaster
->mMediaSink
->Redraw(Info().mVideo
);
3293 mMaster
->mOnPlaybackEvent
.Notify(MediaPlaybackEvent::Invalidate
);
3299 void MediaDecoderStateMachine::BufferingState::Step() {
3300 TimeStamp now
= TimeStamp::Now();
3301 MOZ_ASSERT(!mBufferingStart
.IsNull(), "Must know buffering start time.");
3303 if (Reader()->UseBufferingHeuristics()) {
3304 if (mMaster
->IsWaitingAudioData() || mMaster
->IsWaitingVideoData()) {
3305 // Can't exit buffering when we are still waiting for data.
3306 // Note we don't schedule next loop for we will do that when the wait
3307 // promise is resolved.
3310 // With buffering heuristics, we exit buffering state when we:
3311 // 1. can play through or
3312 // 2. time out (specified by mBufferingWait) or
3313 // 3. have enough buffered data.
3314 TimeDuration elapsed
= now
- mBufferingStart
;
3315 TimeDuration timeout
=
3316 TimeDuration::FromSeconds(mBufferingWait
* mMaster
->mPlaybackRate
);
3317 bool stopBuffering
=
3318 mMaster
->mCanPlayThrough
|| elapsed
>= timeout
||
3319 !mMaster
->HasLowBufferedData(TimeUnit::FromSeconds(mBufferingWait
));
3320 if (!stopBuffering
) {
3321 SLOG("Buffering: wait %ds, timeout in %.3lfs", mBufferingWait
,
3322 mBufferingWait
- elapsed
.ToSeconds());
3323 mMaster
->ScheduleStateMachineIn(TimeUnit::FromMicroseconds(USECS_PER_S
));
3326 } else if (mMaster
->OutOfDecodedAudio() || mMaster
->OutOfDecodedVideo()) {
3327 MOZ_ASSERT(!mMaster
->OutOfDecodedAudio() || mMaster
->IsTrackingAudioData());
3328 MOZ_ASSERT(!mMaster
->OutOfDecodedVideo() || mMaster
->IsTrackingVideoData());
3330 "In buffering mode, waiting to be notified: outOfAudio: %d, "
3331 "mAudioStatus: %s, outOfVideo: %d, mVideoStatus: %s",
3332 mMaster
->OutOfDecodedAudio(), mMaster
->AudioRequestStatus(),
3333 mMaster
->OutOfDecodedVideo(), mMaster
->VideoRequestStatus());
3337 SLOG("Buffered for %.3lfs", (now
- mBufferingStart
).ToSeconds());
3338 mMaster
->mTotalBufferingDuration
+= (now
- mBufferingStart
);
3342 void MediaDecoderStateMachine::BufferingState::HandleEndOfAudio() {
3343 AudioQueue().Finish();
3344 if (!mMaster
->IsVideoDecoding()) {
3345 SetState
<CompletedState
>();
3347 // Check if we can exit buffering.
3348 mMaster
->ScheduleStateMachine();
3352 void MediaDecoderStateMachine::BufferingState::HandleEndOfVideo() {
3353 VideoQueue().Finish();
3354 if (!mMaster
->IsAudioDecoding()) {
3355 SetState
<CompletedState
>();
3357 // Check if we can exit buffering.
3358 mMaster
->ScheduleStateMachine();
3362 RefPtr
<ShutdownPromise
> MediaDecoderStateMachine::ShutdownState::Enter() {
3363 auto* master
= mMaster
;
3365 master
->mDelayedScheduler
.Reset();
3367 // Shutdown happens while decode timer is active, we need to disconnect and
3368 // dispose of the timer.
3369 master
->CancelSuspendTimer();
3371 if (master
->IsPlaying()) {
3372 master
->StopPlayback();
3375 master
->mAudioDataRequest
.DisconnectIfExists();
3376 master
->mVideoDataRequest
.DisconnectIfExists();
3377 master
->mAudioWaitRequest
.DisconnectIfExists();
3378 master
->mVideoWaitRequest
.DisconnectIfExists();
3380 // Resetting decode should be called after stopping media sink, which can
3381 // ensure that we have an empty media queue before seeking the demuxer.
3382 master
->StopMediaSink();
3383 master
->ResetDecode();
3384 master
->mMediaSink
->Shutdown();
3386 // Prevent dangling pointers by disconnecting the listeners.
3387 master
->mAudioQueueListener
.Disconnect();
3388 master
->mVideoQueueListener
.Disconnect();
3389 master
->mMetadataManager
.Disconnect();
3390 master
->mOnMediaNotSeekable
.Disconnect();
3391 master
->mAudibleListener
.DisconnectIfExists();
3393 // Disconnect canonicals and mirrors before shutting down our task queue.
3394 master
->mStreamName
.DisconnectIfConnected();
3395 master
->mSinkDevice
.DisconnectIfConnected();
3396 master
->mOutputCaptureState
.DisconnectIfConnected();
3397 master
->mOutputDummyTrack
.DisconnectIfConnected();
3398 master
->mOutputTracks
.DisconnectIfConnected();
3399 master
->mOutputPrincipal
.DisconnectIfConnected();
3401 master
->mDuration
.DisconnectAll();
3402 master
->mCurrentPosition
.DisconnectAll();
3403 master
->mIsAudioDataAudible
.DisconnectAll();
3405 // Shut down the watch manager to stop further notifications.
3406 master
->mWatchManager
.Shutdown();
3408 return Reader()->Shutdown()->Then(OwnerThread(), __func__
, master
,
3409 &MediaDecoderStateMachine::FinishShutdown
,
3410 &MediaDecoderStateMachine::FinishShutdown
);
3413 #define INIT_WATCHABLE(name, val) name(val, "MediaDecoderStateMachine::" #name)
3414 #define INIT_MIRROR(name, val) \
3415 name(mTaskQueue, val, "MediaDecoderStateMachine::" #name " (Mirror)")
3416 #define INIT_CANONICAL(name, val) \
3417 name(mTaskQueue, val, "MediaDecoderStateMachine::" #name " (Canonical)")
3419 MediaDecoderStateMachine::MediaDecoderStateMachine(MediaDecoder
* aDecoder
,
3420 MediaFormatReader
* aReader
)
3421 : MediaDecoderStateMachineBase(aDecoder
, aReader
),
3422 mWatchManager(this, mTaskQueue
),
3423 mDispatchedStateMachine(false),
3424 mDelayedScheduler(mTaskQueue
, true /*aFuzzy*/),
3426 mAmpleAudioThreshold(detail::AMPLE_AUDIO_THRESHOLD
),
3427 mVideoDecodeSuspended(false),
3428 mVideoDecodeSuspendTimer(mTaskQueue
),
3429 mVideoDecodeMode(VideoDecodeMode::Normal
),
3430 mIsMSE(aDecoder
->IsMSE()),
3431 mShouldResistFingerprinting(aDecoder
->ShouldResistFingerprinting()),
3432 mSeamlessLoopingAllowed(false),
3433 mTotalBufferingDuration(TimeDuration::Zero()),
3434 INIT_MIRROR(mStreamName
, nsAutoString()),
3435 INIT_MIRROR(mSinkDevice
, nullptr),
3436 INIT_MIRROR(mOutputCaptureState
, MediaDecoder::OutputCaptureState::None
),
3437 INIT_MIRROR(mOutputDummyTrack
, nullptr),
3438 INIT_MIRROR(mOutputTracks
, nsTArray
<RefPtr
<ProcessedMediaTrack
>>()),
3439 INIT_MIRROR(mOutputPrincipal
, PRINCIPAL_HANDLE_NONE
),
3440 INIT_CANONICAL(mCanonicalOutputPrincipal
, PRINCIPAL_HANDLE_NONE
),
3441 mShuttingDown(false),
3442 mInitialized(false) {
3443 MOZ_COUNT_CTOR(MediaDecoderStateMachine
);
3444 NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
3446 InitVideoQueuePrefs();
3448 DDLINKCHILD("reader", aReader
);
3451 #undef INIT_WATCHABLE
3453 #undef INIT_CANONICAL
3455 MediaDecoderStateMachine::~MediaDecoderStateMachine() {
3456 MOZ_ASSERT(NS_IsMainThread(), "Should be on main thread.");
3457 MOZ_COUNT_DTOR(MediaDecoderStateMachine
);
3460 void MediaDecoderStateMachine::InitializationTask(MediaDecoder
* aDecoder
) {
3461 AUTO_PROFILER_LABEL("MediaDecoderStateMachine::InitializationTask",
3463 MOZ_ASSERT(OnTaskQueue());
3465 MediaDecoderStateMachineBase::InitializationTask(aDecoder
);
3467 // Initialize watchers.
3468 mWatchManager
.Watch(mStreamName
,
3469 &MediaDecoderStateMachine::StreamNameChanged
);
3470 mWatchManager
.Watch(mOutputCaptureState
,
3471 &MediaDecoderStateMachine::UpdateOutputCaptured
);
3472 mWatchManager
.Watch(mOutputDummyTrack
,
3473 &MediaDecoderStateMachine::UpdateOutputCaptured
);
3474 mWatchManager
.Watch(mOutputTracks
,
3475 &MediaDecoderStateMachine::UpdateOutputCaptured
);
3476 mWatchManager
.Watch(mOutputPrincipal
,
3477 &MediaDecoderStateMachine::OutputPrincipalChanged
);
3479 mMediaSink
= CreateMediaSink();
3480 mInitialized
= true;
3482 MOZ_ASSERT(!mStateObj
);
3483 auto* s
= new DecodeMetadataState(this);
3488 void MediaDecoderStateMachine::AudioAudibleChanged(bool aAudible
) {
3489 mIsAudioDataAudible
= aAudible
;
3492 MediaSink
* MediaDecoderStateMachine::CreateAudioSink() {
3493 if (mOutputCaptureState
!= MediaDecoder::OutputCaptureState::None
) {
3494 DecodedStream
* stream
= new DecodedStream(
3496 mOutputCaptureState
== MediaDecoder::OutputCaptureState::Capture
3497 ? mOutputDummyTrack
.Ref()
3499 mOutputTracks
, mVolume
, mPlaybackRate
, mPreservesPitch
, mAudioQueue
,
3500 mVideoQueue
, mSinkDevice
.Ref());
3501 mAudibleListener
.DisconnectIfExists();
3502 mAudibleListener
= stream
->AudibleEvent().Connect(
3503 OwnerThread(), this, &MediaDecoderStateMachine::AudioAudibleChanged
);
3507 auto audioSinkCreator
= [s
= RefPtr
<MediaDecoderStateMachine
>(this), this]() {
3508 MOZ_ASSERT(OnTaskQueue());
3509 UniquePtr
<AudioSink
> audioSink
{new AudioSink(
3510 mTaskQueue
, mAudioQueue
, Info().mAudio
, mShouldResistFingerprinting
)};
3511 mAudibleListener
.DisconnectIfExists();
3512 mAudibleListener
= audioSink
->AudibleEvent().Connect(
3513 mTaskQueue
, this, &MediaDecoderStateMachine::AudioAudibleChanged
);
3516 return new AudioSinkWrapper(
3517 mTaskQueue
, mAudioQueue
, std::move(audioSinkCreator
), mVolume
,
3518 mPlaybackRate
, mPreservesPitch
, mSinkDevice
.Ref());
3521 already_AddRefed
<MediaSink
> MediaDecoderStateMachine::CreateMediaSink() {
3522 MOZ_ASSERT(OnTaskQueue());
3523 RefPtr
<MediaSink
> audioSink
= CreateAudioSink();
3524 RefPtr
<MediaSink
> mediaSink
=
3525 new VideoSink(mTaskQueue
, audioSink
, mVideoQueue
, mVideoFrameContainer
,
3526 *mFrameStats
, sVideoQueueSendToCompositorSize
);
3527 if (mSecondaryVideoContainer
.Ref()) {
3528 mediaSink
->SetSecondaryVideoContainer(mSecondaryVideoContainer
.Ref());
3530 return mediaSink
.forget();
3533 TimeUnit
MediaDecoderStateMachine::GetDecodedAudioDuration() const {
3534 MOZ_ASSERT(OnTaskQueue());
3535 if (mMediaSink
->IsStarted()) {
3536 return mMediaSink
->UnplayedDuration(TrackInfo::kAudioTrack
) +
3537 TimeUnit::FromMicroseconds(AudioQueue().Duration());
3539 // MediaSink not started. All audio samples are in the queue.
3540 return TimeUnit::FromMicroseconds(AudioQueue().Duration());
3543 bool MediaDecoderStateMachine::HaveEnoughDecodedAudio() const {
3544 MOZ_ASSERT(OnTaskQueue());
3545 auto ampleAudio
= mAmpleAudioThreshold
.MultDouble(mPlaybackRate
);
3546 return AudioQueue().GetSize() > 0 && GetDecodedAudioDuration() >= ampleAudio
;
3549 bool MediaDecoderStateMachine::HaveEnoughDecodedVideo() const {
3550 MOZ_ASSERT(OnTaskQueue());
3551 return static_cast<double>(VideoQueue().GetSize()) >=
3552 GetAmpleVideoFrames() * mPlaybackRate
+ 1 &&
3553 IsVideoDataEnoughComparedWithAudio();
3556 bool MediaDecoderStateMachine::IsVideoDataEnoughComparedWithAudio() const {
3557 // HW decoding is usually fast enough and we don't need to worry about its
3559 // TODO : we can consider whether we need to enable this on other HW decoding
3560 // except VAAPI. When enabling VAAPI on Linux, ffmpeg is not able to store too
3561 // many frames because it has a limitation of amount of stored video frames.
3562 // See bug1716638 and 1718309.
3563 if (mReader
->VideoIsHardwareAccelerated()) {
3566 // In extreme situations (e.g. 4k+ video without hardware acceleration), the
3567 // video decoding will be much slower than audio. So for 4K+ video, we want to
3568 // consider audio decoding speed as well in order to reduce frame drops. This
3569 // check tries to keep the decoded video buffered as much as audio.
3570 if (HasAudio() && Info().mVideo
.mImage
.width
>= 3840 &&
3571 Info().mVideo
.mImage
.height
>= 2160) {
3572 return VideoQueue().Duration() >= AudioQueue().Duration();
3574 // For non-4k video, the video decoding is usually really fast so we won't
3575 // need to consider audio decoding speed to store extra frames.
3579 void MediaDecoderStateMachine::PushAudio(AudioData
* aSample
) {
3580 MOZ_ASSERT(OnTaskQueue());
3581 MOZ_ASSERT(aSample
);
3582 AudioQueue().Push(aSample
);
3583 PROFILER_MARKER("MDSM::PushAudio", MEDIA_PLAYBACK
, {}, MediaSampleMarker
,
3584 aSample
->mTime
.ToMicroseconds(),
3585 aSample
->GetEndTime().ToMicroseconds(),
3586 AudioQueue().GetSize());
3589 void MediaDecoderStateMachine::PushVideo(VideoData
* aSample
) {
3590 MOZ_ASSERT(OnTaskQueue());
3591 MOZ_ASSERT(aSample
);
3592 aSample
->mFrameID
= ++mCurrentFrameID
;
3593 VideoQueue().Push(aSample
);
3594 PROFILER_MARKER("MDSM::PushVideo", MEDIA_PLAYBACK
, {}, MediaSampleMarker
,
3595 aSample
->mTime
.ToMicroseconds(),
3596 aSample
->GetEndTime().ToMicroseconds(),
3597 VideoQueue().GetSize());
3600 void MediaDecoderStateMachine::OnAudioPopped(const RefPtr
<AudioData
>& aSample
) {
3601 MOZ_ASSERT(OnTaskQueue());
3602 mPlaybackOffset
= std::max(mPlaybackOffset
, aSample
->mOffset
);
3605 void MediaDecoderStateMachine::OnVideoPopped(const RefPtr
<VideoData
>& aSample
) {
3606 MOZ_ASSERT(OnTaskQueue());
3607 mPlaybackOffset
= std::max(mPlaybackOffset
, aSample
->mOffset
);
3610 bool MediaDecoderStateMachine::IsAudioDecoding() {
3611 MOZ_ASSERT(OnTaskQueue());
3612 return HasAudio() && !AudioQueue().IsFinished();
3615 bool MediaDecoderStateMachine::IsVideoDecoding() {
3616 MOZ_ASSERT(OnTaskQueue());
3617 return HasVideo() && !VideoQueue().IsFinished();
3620 bool MediaDecoderStateMachine::IsPlaying() const {
3621 MOZ_ASSERT(OnTaskQueue());
3622 return mMediaSink
->IsPlaying();
3625 void MediaDecoderStateMachine::SetMediaNotSeekable() { mMediaSeekable
= false; }
3627 nsresult
MediaDecoderStateMachine::Init(MediaDecoder
* aDecoder
) {
3628 MOZ_ASSERT(NS_IsMainThread());
3630 nsresult rv
= MediaDecoderStateMachineBase::Init(aDecoder
);
3631 if (NS_WARN_IF(NS_FAILED(rv
))) {
3636 aDecoder
->CanonicalStreamName().ConnectMirror(&mStreamName
);
3637 aDecoder
->CanonicalSinkDevice().ConnectMirror(&mSinkDevice
);
3638 aDecoder
->CanonicalOutputCaptureState().ConnectMirror(&mOutputCaptureState
);
3639 aDecoder
->CanonicalOutputDummyTrack().ConnectMirror(&mOutputDummyTrack
);
3640 aDecoder
->CanonicalOutputTracks().ConnectMirror(&mOutputTracks
);
3641 aDecoder
->CanonicalOutputPrincipal().ConnectMirror(&mOutputPrincipal
);
3643 mAudioQueueListener
= AudioQueue().PopFrontEvent().Connect(
3644 mTaskQueue
, this, &MediaDecoderStateMachine::OnAudioPopped
);
3645 mVideoQueueListener
= VideoQueue().PopFrontEvent().Connect(
3646 mTaskQueue
, this, &MediaDecoderStateMachine::OnVideoPopped
);
3647 mOnMediaNotSeekable
= mReader
->OnMediaNotSeekable().Connect(
3648 OwnerThread(), this, &MediaDecoderStateMachine::SetMediaNotSeekable
);
3653 void MediaDecoderStateMachine::StopPlayback() {
3654 MOZ_ASSERT(OnTaskQueue());
3655 LOG("StopPlayback()");
3658 mOnPlaybackEvent
.Notify(MediaPlaybackEvent
{
3659 MediaPlaybackEvent::PlaybackStopped
, mPlaybackOffset
});
3660 mMediaSink
->SetPlaying(false);
3661 MOZ_ASSERT(!IsPlaying());
3665 void MediaDecoderStateMachine::MaybeStartPlayback() {
3666 MOZ_ASSERT(OnTaskQueue());
3667 // Should try to start playback only after decoding first frames.
3668 if (!mSentFirstFrameLoadedEvent
) {
3669 LOG("MaybeStartPlayback: Not starting playback before loading first frame");
3674 // Logging this case is really spammy - don't do it.
3678 if (mIsMediaSinkSuspended
) {
3679 LOG("MaybeStartPlayback: Not starting playback when sink is suspended");
3683 if (mPlayState
!= MediaDecoder::PLAY_STATE_PLAYING
) {
3684 LOG("MaybeStartPlayback: Not starting playback [mPlayState=%d]",
3689 LOG("MaybeStartPlayback() starting playback");
3693 mMediaSink
->SetPlaying(true);
3694 MOZ_ASSERT(IsPlaying());
3697 mOnPlaybackEvent
.Notify(
3698 MediaPlaybackEvent
{MediaPlaybackEvent::PlaybackStarted
, mPlaybackOffset
});
3701 void MediaDecoderStateMachine::UpdatePlaybackPositionInternal(
3702 const TimeUnit
& aTime
) {
3703 MOZ_ASSERT(OnTaskQueue());
3704 LOGV("UpdatePlaybackPositionInternal(%" PRId64
")", aTime
.ToMicroseconds());
3706 // Ensure the position has a precision that matches other TimeUnit such as
3707 // buffering ranges and duration.
3708 mCurrentPosition
= aTime
.ToBase(1000000);
3709 NS_ASSERTION(mCurrentPosition
.Ref() >= TimeUnit::Zero(),
3710 "CurrentTime should be positive!");
3711 if (mDuration
.Ref().ref() < mCurrentPosition
.Ref()) {
3712 mDuration
= Some(mCurrentPosition
.Ref());
3713 DDLOG(DDLogCategory::Property
, "duration_us",
3714 mDuration
.Ref()->ToMicroseconds());
3718 void MediaDecoderStateMachine::UpdatePlaybackPosition(const TimeUnit
& aTime
) {
3719 MOZ_ASSERT(OnTaskQueue());
3720 UpdatePlaybackPositionInternal(aTime
);
3722 bool fragmentEnded
=
3723 mFragmentEndTime
.IsValid() && GetMediaTime() >= mFragmentEndTime
;
3724 mMetadataManager
.DispatchMetadataIfNeeded(aTime
);
3726 if (fragmentEnded
) {
3731 /* static */ const char* MediaDecoderStateMachine::ToStateStr(State aState
) {
3733 case DECODER_STATE_DECODING_METADATA
:
3734 return "DECODING_METADATA";
3735 case DECODER_STATE_DORMANT
:
3737 case DECODER_STATE_DECODING_FIRSTFRAME
:
3738 return "DECODING_FIRSTFRAME";
3739 case DECODER_STATE_DECODING
:
3741 case DECODER_STATE_SEEKING_ACCURATE
:
3742 return "SEEKING_ACCURATE";
3743 case DECODER_STATE_SEEKING_FROMDORMANT
:
3744 return "SEEKING_FROMDORMANT";
3745 case DECODER_STATE_SEEKING_NEXTFRAMESEEKING
:
3746 return "DECODER_STATE_SEEKING_NEXTFRAMESEEKING";
3747 case DECODER_STATE_SEEKING_VIDEOONLY
:
3748 return "SEEKING_VIDEOONLY";
3749 case DECODER_STATE_BUFFERING
:
3751 case DECODER_STATE_COMPLETED
:
3753 case DECODER_STATE_SHUTDOWN
:
3755 case DECODER_STATE_LOOPING_DECODING
:
3756 return "LOOPING_DECODING";
3758 MOZ_ASSERT_UNREACHABLE("Invalid state.");
3763 const char* MediaDecoderStateMachine::ToStateStr() {
3764 MOZ_ASSERT(OnTaskQueue());
3765 return ToStateStr(mStateObj
->GetState());
3768 void MediaDecoderStateMachine::VolumeChanged() {
3769 AUTO_PROFILER_LABEL("MediaDecoderStateMachine::VolumeChanged",
3771 PROFILER_MARKER_TEXT("MDSM::VolumeChanged", MEDIA_PLAYBACK
, {},
3772 nsPrintfCString("%f", mVolume
.Ref()));
3773 MOZ_ASSERT(OnTaskQueue());
3774 mMediaSink
->SetVolume(mVolume
);
3777 RefPtr
<ShutdownPromise
> MediaDecoderStateMachine::Shutdown() {
3778 AUTO_PROFILER_LABEL("MediaDecoderStateMachine::Shutdown", MEDIA_PLAYBACK
);
3779 PROFILER_MARKER_UNTYPED("MDSM::Shutdown", MEDIA_PLAYBACK
);
3780 MOZ_ASSERT(OnTaskQueue());
3781 mShuttingDown
= true;
3782 return mStateObj
->HandleShutdown();
3785 void MediaDecoderStateMachine::PlayStateChanged() {
3786 AUTO_PROFILER_LABEL("MediaDecoderStateMachine::PlayStateChanged",
3788 PROFILER_MARKER_TEXT(
3789 "MDSM::PlayStateChanged", MEDIA_PLAYBACK
, {},
3790 nsPrintfCString("%s", MediaDecoder::EnumValueToString(mPlayState
.Ref())));
3791 MOZ_ASSERT(OnTaskQueue());
3793 if (mPlayState
!= MediaDecoder::PLAY_STATE_PLAYING
) {
3794 CancelSuspendTimer();
3795 } else if (mMinimizePreroll
) {
3796 // Once we start playing, we don't want to minimize our prerolling, as we
3797 // assume the user is likely to want to keep playing in future. This needs
3798 // to happen before we invoke StartDecoding().
3799 mMinimizePreroll
= false;
3802 mStateObj
->HandlePlayStateChanged(mPlayState
);
3805 void MediaDecoderStateMachine::SetVideoDecodeMode(VideoDecodeMode aMode
) {
3806 MOZ_ASSERT(NS_IsMainThread());
3807 nsCOMPtr
<nsIRunnable
> r
= NewRunnableMethod
<VideoDecodeMode
>(
3808 "MediaDecoderStateMachine::SetVideoDecodeModeInternal", this,
3809 &MediaDecoderStateMachine::SetVideoDecodeModeInternal
, aMode
);
3810 OwnerThread()->DispatchStateChange(r
.forget());
3813 void MediaDecoderStateMachine::SetVideoDecodeModeInternal(
3814 VideoDecodeMode aMode
) {
3815 MOZ_ASSERT(OnTaskQueue());
3817 LOG("SetVideoDecodeModeInternal(), VideoDecodeMode=(%s->%s), "
3818 "mVideoDecodeSuspended=%c",
3819 mVideoDecodeMode
== VideoDecodeMode::Normal
? "Normal" : "Suspend",
3820 aMode
== VideoDecodeMode::Normal
? "Normal" : "Suspend",
3821 mVideoDecodeSuspended
? 'T' : 'F');
3823 // Should not suspend decoding if we don't turn on the pref.
3824 if (!StaticPrefs::media_suspend_background_video_enabled() &&
3825 aMode
== VideoDecodeMode::Suspend
) {
3826 LOG("SetVideoDecodeModeInternal(), early return because preference off and "
3831 if (aMode
== mVideoDecodeMode
) {
3832 LOG("SetVideoDecodeModeInternal(), early return because the mode does not "
3837 // Set new video decode mode.
3838 mVideoDecodeMode
= aMode
;
3840 // Start timer to trigger suspended video decoding.
3841 if (mVideoDecodeMode
== VideoDecodeMode::Suspend
) {
3842 TimeStamp target
= TimeStamp::Now() + SuspendBackgroundVideoDelay();
3844 RefPtr
<MediaDecoderStateMachine
> self
= this;
3845 mVideoDecodeSuspendTimer
.Ensure(
3846 target
, [=]() { self
->OnSuspendTimerResolved(); },
3847 []() { MOZ_DIAGNOSTIC_CRASH("SetVideoDecodeModeInternal reject"); });
3848 mOnPlaybackEvent
.Notify(MediaPlaybackEvent::StartVideoSuspendTimer
);
3852 // Resuming from suspended decoding
3854 // If suspend timer exists, destroy it.
3855 CancelSuspendTimer();
3857 if (mVideoDecodeSuspended
) {
3858 auto target
= mMediaSink
->IsStarted() ? GetClock() : GetMediaTime();
3859 AdjustByLooping(target
);
3860 mStateObj
->HandleResumeVideoDecoding(target
+ detail::RESUME_VIDEO_PREMIUM
);
3864 void MediaDecoderStateMachine::BufferedRangeUpdated() {
3865 AUTO_PROFILER_LABEL("MediaDecoderStateMachine::BufferedRangeUpdated",
3867 MOZ_ASSERT(OnTaskQueue());
3869 // While playing an unseekable stream of unknown duration, mDuration
3870 // is updated as we play. But if data is being downloaded
3871 // faster than played, mDuration won't reflect the end of playable data
3872 // since we haven't played the frame at the end of buffered data. So update
3873 // mDuration here as new data is downloaded to prevent such a lag.
3874 if (mBuffered
.Ref().IsInvalid()) {
3879 media::TimeUnit end
{mBuffered
.Ref().GetEnd(&exists
)};
3884 // Use estimated duration from buffer ranges when mDuration is unknown or
3885 // the estimated duration is larger.
3886 if ((mDuration
.Ref().isNothing() || mDuration
.Ref()->IsInfinite() ||
3887 end
> mDuration
.Ref().ref()) &&
3888 end
.IsPositiveOrZero()) {
3889 nsPrintfCString msg
{
3890 "duration:%" PRId64
"->%" PRId64
,
3891 mDuration
.Ref().isNothing() ? 0 : mDuration
.Ref()->ToMicroseconds(),
3892 end
.ToMicroseconds()};
3893 PROFILER_MARKER_TEXT("MDSM::BufferedRangeUpdated", MEDIA_PLAYBACK
, {}, msg
);
3894 LOG("%s", msg
.get());
3895 mDuration
= Some(end
);
3896 DDLOG(DDLogCategory::Property
, "duration_us",
3897 mDuration
.Ref()->ToMicroseconds());
3901 RefPtr
<MediaDecoder::SeekPromise
> MediaDecoderStateMachine::Seek(
3902 const SeekTarget
& aTarget
) {
3903 AUTO_PROFILER_LABEL("MediaDecoderStateMachine::Seek", MEDIA_PLAYBACK
);
3904 PROFILER_MARKER_UNTYPED("MDSM::Seek", MEDIA_PLAYBACK
);
3905 MOZ_ASSERT(OnTaskQueue());
3907 // We need to be able to seek in some way
3908 if (!mMediaSeekable
&& !mMediaSeekableOnlyInBufferedRanges
) {
3909 LOGW("Seek() should not be called on a non-seekable media");
3910 return MediaDecoder::SeekPromise::CreateAndReject(/* aRejectValue = */ true,
3914 if (aTarget
.IsNextFrame() && !HasVideo()) {
3915 LOGW("Ignore a NextFrameSeekTask on a media file without video track.");
3916 return MediaDecoder::SeekPromise::CreateAndReject(/* aRejectValue = */ true,
3920 MOZ_ASSERT(mDuration
.Ref().isSome(), "We should have got duration already");
3922 return mStateObj
->HandleSeek(aTarget
);
3925 void MediaDecoderStateMachine::StopMediaSink() {
3926 MOZ_ASSERT(OnTaskQueue());
3927 if (mMediaSink
->IsStarted()) {
3928 LOG("Stop MediaSink");
3930 mMediaSinkAudioEndedPromise
.DisconnectIfExists();
3931 mMediaSinkVideoEndedPromise
.DisconnectIfExists();
3935 void MediaDecoderStateMachine::RequestAudioData() {
3936 AUTO_PROFILER_LABEL("MediaDecoderStateMachine::RequestAudioData",
3938 MOZ_ASSERT(OnTaskQueue());
3939 MOZ_ASSERT(IsAudioDecoding());
3940 MOZ_ASSERT(!IsRequestingAudioData());
3941 MOZ_ASSERT(!IsWaitingAudioData());
3942 LOGV("Queueing audio task - queued=%zu, decoder-queued=%zu",
3943 AudioQueue().GetSize(), mReader
->SizeOfAudioQueueInFrames());
3945 PerformanceRecorder
<PlaybackStage
> perfRecorder(MediaStage::RequestData
);
3946 RefPtr
<MediaDecoderStateMachine
> self
= this;
3947 mReader
->RequestAudioData()
3949 OwnerThread(), __func__
,
3950 [this, self
, perfRecorder(std::move(perfRecorder
))](
3951 const RefPtr
<AudioData
>& aAudio
) mutable {
3952 perfRecorder
.Record();
3953 AUTO_PROFILER_LABEL(
3954 "MediaDecoderStateMachine::RequestAudioData:Resolved",
3957 mAudioDataRequest
.Complete();
3958 // audio->GetEndTime() is not always mono-increasing in chained
3960 mDecodedAudioEndTime
=
3961 std::max(aAudio
->GetEndTime(), mDecodedAudioEndTime
);
3962 LOGV("OnAudioDecoded [%" PRId64
",%" PRId64
"]",
3963 aAudio
->mTime
.ToMicroseconds(),
3964 aAudio
->GetEndTime().ToMicroseconds());
3965 mStateObj
->HandleAudioDecoded(aAudio
);
3967 [this, self
](const MediaResult
& aError
) {
3968 AUTO_PROFILER_LABEL(
3969 "MediaDecoderStateMachine::RequestAudioData:Rejected",
3971 LOGV("OnAudioNotDecoded ErrorName=%s Message=%s",
3972 aError
.ErrorName().get(), aError
.Message().get());
3973 mAudioDataRequest
.Complete();
3974 switch (aError
.Code()) {
3975 case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA
:
3976 mStateObj
->HandleWaitingForAudio();
3978 case NS_ERROR_DOM_MEDIA_CANCELED
:
3979 mStateObj
->HandleAudioCanceled();
3981 case NS_ERROR_DOM_MEDIA_END_OF_STREAM
:
3982 mStateObj
->HandleEndOfAudio();
3985 DecodeError(aError
);
3988 ->Track(mAudioDataRequest
);
3991 void MediaDecoderStateMachine::RequestVideoData(
3992 const media::TimeUnit
& aCurrentTime
, bool aRequestNextKeyFrame
) {
3993 AUTO_PROFILER_LABEL("MediaDecoderStateMachine::RequestVideoData",
3995 MOZ_ASSERT(OnTaskQueue());
3996 MOZ_ASSERT(IsVideoDecoding());
3997 MOZ_ASSERT(!IsRequestingVideoData());
3998 MOZ_ASSERT(!IsWaitingVideoData());
4000 "Queueing video task - queued=%zu, decoder-queued=%zo"
4001 ", stime=%" PRId64
", by-pass-skip=%d",
4002 VideoQueue().GetSize(), mReader
->SizeOfVideoQueueInFrames(),
4003 aCurrentTime
.ToMicroseconds(), mBypassingSkipToNextKeyFrameCheck
);
4005 PerformanceRecorder
<PlaybackStage
> perfRecorder(MediaStage::RequestData
,
4006 Info().mVideo
.mImage
.height
);
4007 RefPtr
<MediaDecoderStateMachine
> self
= this;
4010 mBypassingSkipToNextKeyFrameCheck
? media::TimeUnit() : aCurrentTime
,
4011 mBypassingSkipToNextKeyFrameCheck
? false : aRequestNextKeyFrame
)
4013 OwnerThread(), __func__
,
4014 [this, self
, perfRecorder(std::move(perfRecorder
))](
4015 const RefPtr
<VideoData
>& aVideo
) mutable {
4016 perfRecorder
.Record();
4017 AUTO_PROFILER_LABEL(
4018 "MediaDecoderStateMachine::RequestVideoData:Resolved",
4021 mVideoDataRequest
.Complete();
4022 // Handle abnormal or negative timestamps.
4023 mDecodedVideoEndTime
=
4024 std::max(mDecodedVideoEndTime
, aVideo
->GetEndTime());
4025 LOGV("OnVideoDecoded [%" PRId64
",%" PRId64
"]",
4026 aVideo
->mTime
.ToMicroseconds(),
4027 aVideo
->GetEndTime().ToMicroseconds());
4028 mStateObj
->HandleVideoDecoded(aVideo
);
4030 [this, self
](const MediaResult
& aError
) {
4031 AUTO_PROFILER_LABEL(
4032 "MediaDecoderStateMachine::RequestVideoData:Rejected",
4034 LOGV("OnVideoNotDecoded ErrorName=%s Message=%s",
4035 aError
.ErrorName().get(), aError
.Message().get());
4036 mVideoDataRequest
.Complete();
4037 switch (aError
.Code()) {
4038 case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA
:
4039 mStateObj
->HandleWaitingForVideo();
4041 case NS_ERROR_DOM_MEDIA_CANCELED
:
4042 mStateObj
->HandleVideoCanceled();
4044 case NS_ERROR_DOM_MEDIA_END_OF_STREAM
:
4045 mStateObj
->HandleEndOfVideo();
4048 DecodeError(aError
);
4051 ->Track(mVideoDataRequest
);
4054 void MediaDecoderStateMachine::WaitForData(MediaData::Type aType
) {
4055 MOZ_ASSERT(OnTaskQueue());
4056 MOZ_ASSERT(aType
== MediaData::Type::AUDIO_DATA
||
4057 aType
== MediaData::Type::VIDEO_DATA
);
4058 LOG("%s: %s", __func__
, MediaData::EnumValueToString(aType
));
4059 RefPtr
<MediaDecoderStateMachine
> self
= this;
4060 if (aType
== MediaData::Type::AUDIO_DATA
) {
4061 mReader
->WaitForData(MediaData::Type::AUDIO_DATA
)
4063 OwnerThread(), __func__
,
4064 [self
](MediaData::Type aType
) {
4065 AUTO_PROFILER_LABEL(
4066 "MediaDecoderStateMachine::WaitForData:AudioResolved",
4068 self
->mAudioWaitRequest
.Complete();
4069 MOZ_ASSERT(aType
== MediaData::Type::AUDIO_DATA
);
4070 self
->mStateObj
->HandleAudioWaited(aType
);
4072 [self
](const WaitForDataRejectValue
& aRejection
) {
4073 AUTO_PROFILER_LABEL(
4074 "MediaDecoderStateMachine::WaitForData:AudioRejected",
4076 self
->mAudioWaitRequest
.Complete();
4077 self
->DecodeError(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA
);
4079 ->Track(mAudioWaitRequest
);
4081 mReader
->WaitForData(MediaData::Type::VIDEO_DATA
)
4083 OwnerThread(), __func__
,
4084 [self
, this](MediaData::Type aType
) {
4085 AUTO_PROFILER_LABEL(
4086 "MediaDecoderStateMachine::WaitForData:VideoResolved",
4088 self
->mVideoWaitRequest
.Complete();
4089 MOZ_ASSERT(aType
== MediaData::Type::VIDEO_DATA
);
4090 LOG("WaitForData::VideoResolved");
4091 self
->mStateObj
->HandleVideoWaited(aType
);
4093 [self
, this](const WaitForDataRejectValue
& aRejection
) {
4094 AUTO_PROFILER_LABEL(
4095 "MediaDecoderStateMachine::WaitForData:VideoRejected",
4097 self
->mVideoWaitRequest
.Complete();
4098 LOG("WaitForData::VideoRejected");
4099 self
->DecodeError(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA
);
4101 ->Track(mVideoWaitRequest
);
4105 nsresult
MediaDecoderStateMachine::StartMediaSink() {
4106 MOZ_ASSERT(OnTaskQueue());
4108 if (mMediaSink
->IsStarted()) {
4112 mAudioCompleted
= false;
4113 const auto startTime
= GetMediaTime();
4114 LOG("StartMediaSink, mediaTime=%" PRId64
, startTime
.ToMicroseconds());
4115 nsresult rv
= mMediaSink
->Start(startTime
, Info());
4116 StreamNameChanged();
4118 auto videoPromise
= mMediaSink
->OnEnded(TrackInfo::kVideoTrack
);
4119 auto audioPromise
= mMediaSink
->OnEnded(TrackInfo::kAudioTrack
);
4123 ->Then(OwnerThread(), __func__
, this,
4124 &MediaDecoderStateMachine::OnMediaSinkAudioComplete
,
4125 &MediaDecoderStateMachine::OnMediaSinkAudioError
)
4126 ->Track(mMediaSinkAudioEndedPromise
);
4130 ->Then(OwnerThread(), __func__
, this,
4131 &MediaDecoderStateMachine::OnMediaSinkVideoComplete
,
4132 &MediaDecoderStateMachine::OnMediaSinkVideoError
)
4133 ->Track(mMediaSinkVideoEndedPromise
);
4135 // Remember the initial offset when playback starts. This will be used
4136 // to calculate the rate at which bytes are consumed as playback moves on.
4137 RefPtr
<MediaData
> sample
= mAudioQueue
.PeekFront();
4138 mPlaybackOffset
= sample
? sample
->mOffset
: 0;
4139 sample
= mVideoQueue
.PeekFront();
4140 if (sample
&& sample
->mOffset
> mPlaybackOffset
) {
4141 mPlaybackOffset
= sample
->mOffset
;
4146 bool MediaDecoderStateMachine::HasLowDecodedAudio() {
4147 MOZ_ASSERT(OnTaskQueue());
4148 return IsAudioDecoding() &&
4149 GetDecodedAudioDuration() <
4150 EXHAUSTED_DATA_MARGIN
.MultDouble(mPlaybackRate
);
4153 bool MediaDecoderStateMachine::HasLowDecodedVideo() {
4154 MOZ_ASSERT(OnTaskQueue());
4155 return IsVideoDecoding() &&
4156 VideoQueue().GetSize() <
4157 static_cast<size_t>(floorl(LOW_VIDEO_FRAMES
* mPlaybackRate
));
4160 bool MediaDecoderStateMachine::HasLowDecodedData() {
4161 MOZ_ASSERT(OnTaskQueue());
4162 MOZ_ASSERT(mReader
->UseBufferingHeuristics());
4163 return HasLowDecodedAudio() || HasLowDecodedVideo();
4166 bool MediaDecoderStateMachine::OutOfDecodedAudio() {
4167 MOZ_ASSERT(OnTaskQueue());
4168 return IsAudioDecoding() && !AudioQueue().IsFinished() &&
4169 AudioQueue().GetSize() == 0 &&
4170 !mMediaSink
->HasUnplayedFrames(TrackInfo::kAudioTrack
);
4173 bool MediaDecoderStateMachine::HasLowBufferedData() {
4174 MOZ_ASSERT(OnTaskQueue());
4175 return HasLowBufferedData(detail::LOW_BUFFER_THRESHOLD
);
4178 bool MediaDecoderStateMachine::HasLowBufferedData(const TimeUnit
& aThreshold
) {
4179 MOZ_ASSERT(OnTaskQueue());
4181 // If we don't have a duration, mBuffered is probably not going to have
4182 // a useful buffered range. Return false here so that we don't get stuck in
4183 // buffering mode for live streams.
4184 if (Duration().IsInfinite()) {
4188 if (mBuffered
.Ref().IsInvalid()) {
4192 // We are never low in decoded data when we don't have audio/video or have
4193 // decoded all audio/video samples.
4194 TimeUnit endOfDecodedVideo
= (HasVideo() && !VideoQueue().IsFinished())
4195 ? mDecodedVideoEndTime
4196 : TimeUnit::FromNegativeInfinity();
4197 TimeUnit endOfDecodedAudio
= (HasAudio() && !AudioQueue().IsFinished())
4198 ? mDecodedAudioEndTime
4199 : TimeUnit::FromNegativeInfinity();
4201 auto endOfDecodedData
= std::max(endOfDecodedVideo
, endOfDecodedAudio
);
4202 if (Duration() < endOfDecodedData
) {
4203 // Our duration is not up to date. No point buffering.
4207 if (endOfDecodedData
.IsInfinite()) {
4208 // Have decoded all samples. No point buffering.
4212 auto start
= endOfDecodedData
;
4213 auto end
= std::min(GetMediaTime() + aThreshold
, Duration());
4215 // Duration of decoded samples is greater than our threshold.
4218 media::TimeInterval
interval(start
, end
);
4219 return !mBuffered
.Ref().Contains(interval
);
4222 void MediaDecoderStateMachine::EnqueueFirstFrameLoadedEvent() {
4223 MOZ_ASSERT(OnTaskQueue());
4224 // Track value of mSentFirstFrameLoadedEvent from before updating it
4225 bool firstFrameBeenLoaded
= mSentFirstFrameLoadedEvent
;
4226 mSentFirstFrameLoadedEvent
= true;
4227 MediaDecoderEventVisibility visibility
=
4228 firstFrameBeenLoaded
? MediaDecoderEventVisibility::Suppressed
4229 : MediaDecoderEventVisibility::Observable
;
4230 mFirstFrameLoadedEvent
.Notify(UniquePtr
<MediaInfo
>(new MediaInfo(Info())),
4234 void MediaDecoderStateMachine::FinishDecodeFirstFrame() {
4235 MOZ_ASSERT(OnTaskQueue());
4236 MOZ_ASSERT(!mSentFirstFrameLoadedEvent
);
4237 LOG("FinishDecodeFirstFrame");
4239 mMediaSink
->Redraw(Info().mVideo
);
4241 LOG("Media duration %" PRId64
", mediaSeekable=%d",
4242 Duration().ToMicroseconds(), mMediaSeekable
);
4244 // Get potentially updated metadata
4245 mReader
->ReadUpdatedMetadata(mInfo
.ptr());
4247 EnqueueFirstFrameLoadedEvent();
4250 RefPtr
<ShutdownPromise
> MediaDecoderStateMachine::FinishShutdown() {
4251 AUTO_PROFILER_LABEL("MediaDecoderStateMachine::FinishShutdown",
4253 MOZ_ASSERT(OnTaskQueue());
4254 LOG("Shutting down state machine task queue");
4255 return OwnerThread()->BeginShutdown();
4258 void MediaDecoderStateMachine::RunStateMachine() {
4259 MOZ_ASSERT(OnTaskQueue());
4260 AUTO_PROFILER_LABEL("MediaDecoderStateMachine::RunStateMachine",
4262 mDelayedScheduler
.Reset(); // Must happen on state machine task queue.
4263 mDispatchedStateMachine
= false;
4267 void MediaDecoderStateMachine::ResetDecode(const TrackSet
& aTracks
) {
4268 MOZ_ASSERT(OnTaskQueue());
4269 LOG("MediaDecoderStateMachine::Reset");
4271 // Assert that aTracks specifies to reset the video track because we
4272 // don't currently support resetting just the audio track.
4273 MOZ_ASSERT(aTracks
.contains(TrackInfo::kVideoTrack
));
4275 if (aTracks
.contains(TrackInfo::kVideoTrack
)) {
4276 mDecodedVideoEndTime
= TimeUnit::Zero();
4277 mVideoCompleted
= false;
4278 VideoQueue().Reset();
4279 mVideoDataRequest
.DisconnectIfExists();
4280 mVideoWaitRequest
.DisconnectIfExists();
4283 if (aTracks
.contains(TrackInfo::kAudioTrack
)) {
4284 mDecodedAudioEndTime
= TimeUnit::Zero();
4285 mAudioCompleted
= false;
4286 AudioQueue().Reset();
4287 mAudioDataRequest
.DisconnectIfExists();
4288 mAudioWaitRequest
.DisconnectIfExists();
4291 mReader
->ResetDecode(aTracks
);
4294 media::TimeUnit
MediaDecoderStateMachine::GetClock(
4295 TimeStamp
* aTimeStamp
) const {
4296 MOZ_ASSERT(OnTaskQueue());
4297 auto clockTime
= mMediaSink
->GetPosition(aTimeStamp
);
4298 // This fails on Windows some times, see 1765563
4300 NS_ASSERTION(GetMediaTime() <= clockTime
, "Clock should go forwards.");
4302 MOZ_ASSERT(GetMediaTime() <= clockTime
, "Clock should go forwards.");
4307 void MediaDecoderStateMachine::UpdatePlaybackPositionPeriodically() {
4308 MOZ_ASSERT(OnTaskQueue());
4314 // Cap the current time to the larger of the audio and video end time.
4315 // This ensures that if we're running off the system clock, we don't
4316 // advance the clock to after the media end time.
4317 if (VideoEndTime() > TimeUnit::Zero() || AudioEndTime() > TimeUnit::Zero()) {
4318 auto clockTime
= GetClock();
4319 // Once looping was turned on, the time is probably larger than the duration
4320 // of the media track, so the time over the end should be corrected.
4321 AdjustByLooping(clockTime
);
4322 bool loopback
= clockTime
< GetMediaTime() && mLooping
;
4323 if (loopback
&& mBypassingSkipToNextKeyFrameCheck
) {
4324 LOG("media has looped back, no longer bypassing skip-to-next-key-frame");
4325 mBypassingSkipToNextKeyFrameCheck
= false;
4328 // Skip frames up to the frame at the playback position, and figure out
4329 // the time remaining until it's time to display the next frame and drop
4330 // the current frame.
4331 NS_ASSERTION(clockTime
>= TimeUnit::Zero(),
4332 "Should have positive clock time.");
4334 // These will be non -1 if we've displayed a video frame, or played an audio
4336 auto maxEndTime
= std::max(VideoEndTime(), AudioEndTime());
4337 auto t
= std::min(clockTime
, maxEndTime
);
4338 // FIXME: Bug 1091422 - chained ogg files hit this assertion.
4339 // MOZ_ASSERT(t >= GetMediaTime());
4340 if (loopback
|| t
> GetMediaTime()) {
4341 UpdatePlaybackPosition(t
);
4344 // Note we have to update playback position before releasing the monitor.
4345 // Otherwise, MediaDecoder::AddOutputTrack could kick in when we are outside
4346 // the monitor and get a staled value from GetCurrentTimeUs() which hits the
4347 // assertion in GetClock().
4349 int64_t delay
= std::max
<int64_t>(
4350 1, static_cast<int64_t>(AUDIO_DURATION_USECS
/ mPlaybackRate
));
4351 ScheduleStateMachineIn(TimeUnit::FromMicroseconds(delay
));
4353 // Notify the listener as we progress in the playback offset. Note it would
4354 // be too intensive to send notifications for each popped audio/video sample.
4355 // It is good enough to send 'PlaybackProgressed' events every 40us (defined
4356 // by AUDIO_DURATION_USECS), and we ensure 'PlaybackProgressed' events are
4357 // always sent after 'PlaybackStarted' and before 'PlaybackStopped'.
4358 mOnPlaybackEvent
.Notify(MediaPlaybackEvent
{
4359 MediaPlaybackEvent::PlaybackProgressed
, mPlaybackOffset
});
4362 void MediaDecoderStateMachine::ScheduleStateMachine() {
4363 MOZ_ASSERT(OnTaskQueue());
4364 if (mDispatchedStateMachine
) {
4367 mDispatchedStateMachine
= true;
4369 nsresult rv
= OwnerThread()->Dispatch(
4370 NewRunnableMethod("MediaDecoderStateMachine::RunStateMachine", this,
4371 &MediaDecoderStateMachine::RunStateMachine
));
4372 MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv
));
4376 void MediaDecoderStateMachine::ScheduleStateMachineIn(const TimeUnit
& aTime
) {
4377 AUTO_PROFILER_LABEL("MediaDecoderStateMachine::ScheduleStateMachineIn",
4379 MOZ_ASSERT(OnTaskQueue()); // mDelayedScheduler.Ensure() may Disconnect()
4380 // the promise, which must happen on the state
4381 // machine task queue.
4382 MOZ_ASSERT(aTime
> TimeUnit::Zero());
4383 if (mDispatchedStateMachine
) {
4387 TimeStamp target
= TimeStamp::Now() + aTime
.ToTimeDuration();
4389 // It is OK to capture 'this' without causing UAF because the callback
4390 // always happens before shutdown.
4391 RefPtr
<MediaDecoderStateMachine
> self
= this;
4392 mDelayedScheduler
.Ensure(
4395 self
->mDelayedScheduler
.CompleteRequest();
4396 self
->RunStateMachine();
4398 []() { MOZ_DIAGNOSTIC_CRASH("ScheduleStateMachineIn reject"); });
4401 bool MediaDecoderStateMachine::IsStateMachineScheduled() const {
4402 MOZ_ASSERT(OnTaskQueue());
4403 return mDispatchedStateMachine
|| mDelayedScheduler
.IsScheduled();
4406 void MediaDecoderStateMachine::SetPlaybackRate(double aPlaybackRate
) {
4407 MOZ_ASSERT(OnTaskQueue());
4408 MOZ_ASSERT(aPlaybackRate
!= 0, "Should be handled by MediaDecoder::Pause()");
4409 PROFILER_MARKER_TEXT("MDSM::SetPlaybackRate", MEDIA_PLAYBACK
, {},
4410 nsPrintfCString("PlaybackRate:%f", aPlaybackRate
));
4411 mPlaybackRate
= aPlaybackRate
;
4412 mMediaSink
->SetPlaybackRate(mPlaybackRate
);
4414 // Schedule next cycle to check if we can stop prerolling.
4415 ScheduleStateMachine();
4418 void MediaDecoderStateMachine::PreservesPitchChanged() {
4419 AUTO_PROFILER_LABEL("MediaDecoderStateMachine::PreservesPitchChanged",
4421 PROFILER_MARKER_TEXT(
4422 "MDSM::PreservesPitchChanged", MEDIA_PLAYBACK
, {},
4423 nsPrintfCString("PreservesPitch:%d", mPreservesPitch
.Ref()));
4424 MOZ_ASSERT(OnTaskQueue());
4425 mMediaSink
->SetPreservesPitch(mPreservesPitch
);
4428 void MediaDecoderStateMachine::LoopingChanged() {
4429 AUTO_PROFILER_LABEL("MediaDecoderStateMachine::LoopingChanged",
4431 MOZ_ASSERT(OnTaskQueue());
4432 LOGV("LoopingChanged, looping=%d", mLooping
.Ref());
4433 PROFILER_MARKER_TEXT("MDSM::LoopingChanged", MEDIA_PLAYBACK
, {},
4434 mLooping
? "true"_ns
: "false"_ns
);
4435 if (mSeamlessLoopingAllowed
) {
4436 mStateObj
->HandleLoopingChanged();
4440 void MediaDecoderStateMachine::StreamNameChanged() {
4441 AUTO_PROFILER_LABEL("MediaDecoderStateMachine::StreamNameChanged",
4443 MOZ_ASSERT(OnTaskQueue());
4445 mMediaSink
->SetStreamName(mStreamName
);
4448 void MediaDecoderStateMachine::UpdateOutputCaptured() {
4449 AUTO_PROFILER_LABEL("MediaDecoderStateMachine::UpdateOutputCaptured",
4451 MOZ_ASSERT(OnTaskQueue());
4453 mOutputCaptureState
== MediaDecoder::OutputCaptureState::Capture
,
4454 mOutputDummyTrack
.Ref());
4456 // Reset these flags so they are consistent with the status of the sink.
4457 // TODO: Move these flags into MediaSink to improve cohesion so we don't need
4458 // to reset these flags when switching MediaSinks.
4459 mAudioCompleted
= false;
4460 mVideoCompleted
= false;
4462 // Don't create a new media sink if we're still suspending media sink.
4463 if (!mIsMediaSinkSuspended
) {
4464 const bool wasPlaying
= IsPlaying();
4465 // Stop and shut down the existing sink.
4467 mMediaSink
->Shutdown();
4469 // Create a new sink according to whether output is captured.
4470 mMediaSink
= CreateMediaSink();
4472 DebugOnly
<nsresult
> rv
= StartMediaSink();
4473 MOZ_ASSERT(NS_SUCCEEDED(rv
));
4477 // Don't buffer as much when audio is captured because we don't need to worry
4478 // about high latency audio devices.
4479 mAmpleAudioThreshold
=
4480 mOutputCaptureState
!= MediaDecoder::OutputCaptureState::None
4481 ? detail::AMPLE_AUDIO_THRESHOLD
/ 2
4482 : detail::AMPLE_AUDIO_THRESHOLD
;
4484 mStateObj
->HandleAudioCaptured();
4487 void MediaDecoderStateMachine::OutputPrincipalChanged() {
4488 MOZ_ASSERT(OnTaskQueue());
4489 mCanonicalOutputPrincipal
= mOutputPrincipal
;
4492 RefPtr
<GenericPromise
> MediaDecoderStateMachine::InvokeSetSink(
4493 const RefPtr
<AudioDeviceInfo
>& aSink
) {
4494 MOZ_ASSERT(NS_IsMainThread());
4497 return InvokeAsync(OwnerThread(), this, __func__
,
4498 &MediaDecoderStateMachine::SetSink
, aSink
);
4501 RefPtr
<GenericPromise
> MediaDecoderStateMachine::SetSink(
4502 RefPtr
<AudioDeviceInfo
> aDevice
) {
4503 MOZ_ASSERT(OnTaskQueue());
4504 if (mIsMediaSinkSuspended
) {
4505 // Don't create a new media sink when suspended.
4506 return GenericPromise::CreateAndResolve(true, __func__
);
4509 return mMediaSink
->SetAudioDevice(std::move(aDevice
));
4512 void MediaDecoderStateMachine::InvokeSuspendMediaSink() {
4513 MOZ_ASSERT(NS_IsMainThread());
4515 nsresult rv
= OwnerThread()->Dispatch(
4516 NewRunnableMethod("MediaDecoderStateMachine::SuspendMediaSink", this,
4517 &MediaDecoderStateMachine::SuspendMediaSink
));
4518 MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv
));
4522 void MediaDecoderStateMachine::SuspendMediaSink() {
4523 AUTO_PROFILER_LABEL("MediaDecoderStateMachine::SuspendMediaSink",
4525 MOZ_ASSERT(OnTaskQueue());
4526 if (mIsMediaSinkSuspended
) {
4529 LOG("SuspendMediaSink");
4530 mIsMediaSinkSuspended
= true;
4532 mMediaSink
->Shutdown();
4535 void MediaDecoderStateMachine::InvokeResumeMediaSink() {
4536 MOZ_ASSERT(NS_IsMainThread());
4538 nsresult rv
= OwnerThread()->Dispatch(
4539 NewRunnableMethod("MediaDecoderStateMachine::ResumeMediaSink", this,
4540 &MediaDecoderStateMachine::ResumeMediaSink
));
4541 MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv
));
4545 void MediaDecoderStateMachine::ResumeMediaSink() {
4546 AUTO_PROFILER_LABEL("MediaDecoderStateMachine::ResumeMediaSink",
4548 MOZ_ASSERT(OnTaskQueue());
4549 if (!mIsMediaSinkSuspended
) {
4552 LOG("ResumeMediaSink");
4553 mIsMediaSinkSuspended
= false;
4554 if (!mMediaSink
->IsStarted()) {
4555 mMediaSink
= CreateMediaSink();
4556 MaybeStartPlayback();
4560 void MediaDecoderStateMachine::UpdateSecondaryVideoContainer() {
4561 AUTO_PROFILER_LABEL("MediaDecoderStateMachine::UpdateSecondaryVideoContainer",
4563 MOZ_ASSERT(OnTaskQueue());
4564 MOZ_DIAGNOSTIC_ASSERT(mMediaSink
);
4565 mMediaSink
->SetSecondaryVideoContainer(mSecondaryVideoContainer
.Ref());
4566 mOnSecondaryVideoContainerInstalled
.Notify(mSecondaryVideoContainer
.Ref());
4569 TimeUnit
MediaDecoderStateMachine::AudioEndTime() const {
4570 MOZ_ASSERT(OnTaskQueue());
4571 if (mMediaSink
->IsStarted()) {
4572 return mMediaSink
->GetEndTime(TrackInfo::kAudioTrack
);
4574 return GetMediaTime();
4577 TimeUnit
MediaDecoderStateMachine::VideoEndTime() const {
4578 MOZ_ASSERT(OnTaskQueue());
4579 if (mMediaSink
->IsStarted()) {
4580 return mMediaSink
->GetEndTime(TrackInfo::kVideoTrack
);
4582 return GetMediaTime();
4585 void MediaDecoderStateMachine::OnMediaSinkVideoComplete() {
4586 MOZ_ASSERT(OnTaskQueue());
4587 MOZ_ASSERT(HasVideo());
4588 AUTO_PROFILER_LABEL("MediaDecoderStateMachine::OnMediaSinkVideoComplete",
4590 LOG("[%s]", __func__
);
4592 mMediaSinkVideoEndedPromise
.Complete();
4593 mVideoCompleted
= true;
4594 ScheduleStateMachine();
4597 void MediaDecoderStateMachine::OnMediaSinkVideoError() {
4598 MOZ_ASSERT(OnTaskQueue());
4599 MOZ_ASSERT(HasVideo());
4600 AUTO_PROFILER_LABEL("MediaDecoderStateMachine::OnMediaSinkVideoError",
4602 LOGE("[%s]", __func__
);
4604 mMediaSinkVideoEndedPromise
.Complete();
4605 mVideoCompleted
= true;
4609 DecodeError(MediaResult(NS_ERROR_DOM_MEDIA_MEDIASINK_ERR
, __func__
));
4612 void MediaDecoderStateMachine::OnMediaSinkAudioComplete() {
4613 MOZ_ASSERT(OnTaskQueue());
4614 MOZ_ASSERT(HasAudio());
4615 AUTO_PROFILER_LABEL("MediaDecoderStateMachine::OnMediaSinkAudioComplete",
4617 LOG("[%s]", __func__
);
4619 mMediaSinkAudioEndedPromise
.Complete();
4620 mAudioCompleted
= true;
4621 // To notify PlaybackEnded as soon as possible.
4622 ScheduleStateMachine();
4624 // Report OK to Decoder Doctor (to know if issue may have been resolved).
4625 mOnDecoderDoctorEvent
.Notify(
4626 DecoderDoctorEvent
{DecoderDoctorEvent::eAudioSinkStartup
, NS_OK
});
4629 void MediaDecoderStateMachine::OnMediaSinkAudioError(nsresult aResult
) {
4630 MOZ_ASSERT(OnTaskQueue());
4631 MOZ_ASSERT(HasAudio());
4632 AUTO_PROFILER_LABEL("MediaDecoderStateMachine::OnMediaSinkAudioError",
4634 LOGE("[%s]", __func__
);
4636 mMediaSinkAudioEndedPromise
.Complete();
4637 mAudioCompleted
= true;
4639 // Result should never be NS_OK in this *error* handler. Report to Dec-Doc.
4640 MOZ_ASSERT(NS_FAILED(aResult
));
4641 mOnDecoderDoctorEvent
.Notify(
4642 DecoderDoctorEvent
{DecoderDoctorEvent::eAudioSinkStartup
, aResult
});
4644 // Make the best effort to continue playback when there is video.
4649 // Otherwise notify media decoder/element about this error for it makes
4650 // no sense to play an audio-only file without sound output.
4651 DecodeError(MediaResult(NS_ERROR_DOM_MEDIA_MEDIASINK_ERR
, __func__
));
4654 uint32_t MediaDecoderStateMachine::GetAmpleVideoFrames() const {
4655 MOZ_ASSERT(OnTaskQueue());
4656 return mReader
->VideoIsHardwareAccelerated()
4657 ? std::max
<uint32_t>(sVideoQueueHWAccelSize
, MIN_VIDEO_QUEUE_SIZE
)
4658 : std::max
<uint32_t>(sVideoQueueDefaultSize
, MIN_VIDEO_QUEUE_SIZE
);
4661 void MediaDecoderStateMachine::GetDebugInfo(
4662 dom::MediaDecoderStateMachineDebugInfo
& aInfo
) {
4663 MOZ_ASSERT(OnTaskQueue());
4665 mDuration
.Ref() ? mDuration
.Ref().ref().ToMicroseconds() : -1;
4666 aInfo
.mMediaTime
= GetMediaTime().ToMicroseconds();
4667 aInfo
.mClock
= mMediaSink
->IsStarted() ? GetClock().ToMicroseconds() : -1;
4668 aInfo
.mPlayState
= int32_t(mPlayState
.Ref());
4669 aInfo
.mSentFirstFrameLoadedEvent
= mSentFirstFrameLoadedEvent
;
4670 aInfo
.mIsPlaying
= IsPlaying();
4671 CopyUTF8toUTF16(MakeStringSpan(AudioRequestStatus()),
4672 aInfo
.mAudioRequestStatus
);
4673 CopyUTF8toUTF16(MakeStringSpan(VideoRequestStatus()),
4674 aInfo
.mVideoRequestStatus
);
4675 aInfo
.mDecodedAudioEndTime
= mDecodedAudioEndTime
.ToMicroseconds();
4676 aInfo
.mDecodedVideoEndTime
= mDecodedVideoEndTime
.ToMicroseconds();
4677 aInfo
.mAudioCompleted
= mAudioCompleted
;
4678 aInfo
.mVideoCompleted
= mVideoCompleted
;
4679 mStateObj
->GetDebugInfo(aInfo
.mStateObj
);
4680 mMediaSink
->GetDebugInfo(aInfo
.mMediaSink
);
4681 aInfo
.mTotalBufferingTimeMs
= mTotalBufferingDuration
.ToMilliseconds();
4684 RefPtr
<GenericPromise
> MediaDecoderStateMachine::RequestDebugInfo(
4685 dom::MediaDecoderStateMachineDebugInfo
& aInfo
) {
4686 if (mShuttingDown
) {
4687 return GenericPromise::CreateAndReject(NS_ERROR_FAILURE
, __func__
);
4690 RefPtr
<GenericPromise::Private
> p
= new GenericPromise::Private(__func__
);
4691 RefPtr
<MediaDecoderStateMachine
> self
= this;
4692 nsresult rv
= OwnerThread()->Dispatch(
4693 NS_NewRunnableFunction("MediaDecoderStateMachine::RequestDebugInfo",
4694 [self
, p
, &aInfo
]() {
4695 self
->GetDebugInfo(aInfo
);
4696 p
->Resolve(true, __func__
);
4698 AbstractThread::TailDispatch
);
4699 MOZ_ASSERT(NS_SUCCEEDED(rv
));
4704 class VideoQueueMemoryFunctor
: public nsDequeFunctor
<VideoData
> {
4706 VideoQueueMemoryFunctor() : mSize(0) {}
4708 MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf
);
4710 virtual void operator()(VideoData
* aObject
) override
{
4711 mSize
+= aObject
->SizeOfIncludingThis(MallocSizeOf
);
4717 class AudioQueueMemoryFunctor
: public nsDequeFunctor
<AudioData
> {
4719 AudioQueueMemoryFunctor() : mSize(0) {}
4721 MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf
);
4723 virtual void operator()(AudioData
* aObject
) override
{
4724 mSize
+= aObject
->SizeOfIncludingThis(MallocSizeOf
);
4730 size_t MediaDecoderStateMachine::SizeOfVideoQueue() const {
4731 VideoQueueMemoryFunctor functor
;
4732 mVideoQueue
.LockedForEach(functor
);
4733 return functor
.mSize
;
4736 size_t MediaDecoderStateMachine::SizeOfAudioQueue() const {
4737 AudioQueueMemoryFunctor functor
;
4738 mAudioQueue
.LockedForEach(functor
);
4739 return functor
.mSize
;
4742 const char* MediaDecoderStateMachine::AudioRequestStatus() const {
4743 MOZ_ASSERT(OnTaskQueue());
4744 if (IsRequestingAudioData()) {
4745 MOZ_DIAGNOSTIC_ASSERT(!IsWaitingAudioData());
4749 if (IsWaitingAudioData()) {
4755 const char* MediaDecoderStateMachine::VideoRequestStatus() const {
4756 MOZ_ASSERT(OnTaskQueue());
4757 if (IsRequestingVideoData()) {
4758 MOZ_DIAGNOSTIC_ASSERT(!IsWaitingVideoData());
4762 if (IsWaitingVideoData()) {
4768 void MediaDecoderStateMachine::OnSuspendTimerResolved() {
4769 LOG("OnSuspendTimerResolved");
4770 mVideoDecodeSuspendTimer
.CompleteRequest();
4771 mStateObj
->HandleVideoSuspendTimeout();
4774 void MediaDecoderStateMachine::CancelSuspendTimer() {
4775 LOG("CancelSuspendTimer: State: %s, Timer.IsScheduled: %c",
4776 ToStateStr(mStateObj
->GetState()),
4777 mVideoDecodeSuspendTimer
.IsScheduled() ? 'T' : 'F');
4778 MOZ_ASSERT(OnTaskQueue());
4779 if (mVideoDecodeSuspendTimer
.IsScheduled()) {
4780 mOnPlaybackEvent
.Notify(MediaPlaybackEvent::CancelVideoSuspendTimer
);
4782 mVideoDecodeSuspendTimer
.Reset();
4785 void MediaDecoderStateMachine::AdjustByLooping(media::TimeUnit
& aTime
) const {
4786 MOZ_ASSERT(OnTaskQueue());
4788 // No need to adjust time.
4789 if (mOriginalDecodedDuration
== media::TimeUnit::Zero()) {
4793 // There are situations where we need to perform subtraction instead of modulo
4794 // to accurately adjust the clock. When we are not in a state of seamless
4795 // looping, it is usually necessary to normalize the clock time within the
4796 // range of [0, duration]. However, if the current clock time is greater than
4797 // the duration (i.e., duration+1) and not in looping, we should not adjust it
4798 // to 1 as we are not looping back to the starting position. Instead, we
4799 // should leave the clock time unchanged and trim it later to match the
4800 // maximum duration time.
4801 if (mStateObj
->GetState() != DECODER_STATE_LOOPING_DECODING
) {
4802 // Use the smaller offset rather than the larger one, as the larger offset
4803 // indicates the next round of looping. For example, if the duration is X
4804 // and the playback is currently in the third round of looping, both
4805 // queues will have an offset of 3X. However, if the audio decoding is
4806 // faster and the fourth round of data has already been added to the audio
4807 // queue, the audio offset will become 4X. Since playback is still in the
4808 // third round, we should use the smaller offset of 3X to adjust the time.
4809 TimeUnit offset
= TimeUnit::FromInfinity();
4811 offset
= std::min(AudioQueue().GetOffset(), offset
);
4814 offset
= std::min(VideoQueue().GetOffset(), offset
);
4816 if (aTime
> offset
) {
4822 // When seamless looping happens at least once, it doesn't matter if we're
4824 aTime
= aTime
% mOriginalDecodedDuration
;
4827 bool MediaDecoderStateMachine::IsInSeamlessLooping() const {
4828 return mLooping
&& mSeamlessLoopingAllowed
;
4831 bool MediaDecoderStateMachine::HasLastDecodedData(MediaData::Type aType
) {
4832 MOZ_DIAGNOSTIC_ASSERT(aType
== MediaData::Type::AUDIO_DATA
||
4833 aType
== MediaData::Type::VIDEO_DATA
);
4834 if (aType
== MediaData::Type::AUDIO_DATA
) {
4835 return mDecodedAudioEndTime
!= TimeUnit::Zero();
4837 return mDecodedVideoEndTime
!= TimeUnit::Zero();
4840 bool MediaDecoderStateMachine::IsCDMProxySupported(CDMProxy
* aProxy
) {
4843 // This proxy only works with the external state machine.
4844 return !aProxy
->AsWMFCDMProxy();
4850 RefPtr
<SetCDMPromise
> MediaDecoderStateMachine::SetCDMProxy(CDMProxy
* aProxy
) {
4851 // Playback hasn't started yet.
4852 if (!mInitialized
) {
4853 mReader
->SetEncryptedCustomIdent();
4855 return MediaDecoderStateMachineBase::SetCDMProxy(aProxy
);
4858 } // namespace mozilla
4860 // avoid redefined macro in unified build
4867 #undef NS_DispatchToMainThread