Quellcodebibliothek Statistik Leitseite products/sources/formale Sprachen/C/Firefox/dom/media/   (Browser von der Mozilla Stiftung Version 136.0.1©)  Datei vom 10.2.2025 mit Größe 172 kB image not shown  

Quelle  MediaDecoderStateMachine.cpp   Sprache: C

 
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
 * License, v. 2.0. If a copy of the MPL was not distributed with this
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */


#include "MediaDecoderStateMachine.h"

#include <algorithm>
#include <stdint.h>
#include <utility>

#include "AudioSegment.h"
#include "DOMMediaStream.h"
#include "ImageContainer.h"
#include "MediaDecoder.h"
#include "MediaShutdownManager.h"
#include "MediaTimer.h"
#include "MediaTrackGraph.h"
#include "PerformanceRecorder.h"
#include "ReaderProxy.h"
#include "TimeUnits.h"
#include "VideoSegment.h"
#include "VideoUtils.h"
#include "mediasink/AudioSink.h"
#include "mediasink/AudioSinkWrapper.h"
#include "mediasink/DecodedStream.h"
#include "mediasink/VideoSink.h"
#include "mozilla/Logging.h"
#include "mozilla/MathAlgorithms.h"
#include "mozilla/NotNull.h"
#include "mozilla/Preferences.h"
#include "mozilla/ProfilerLabels.h"
#include "mozilla/ProfilerMarkerTypes.h"
#include "mozilla/ProfilerMarkers.h"
#include "mozilla/SharedThreadPool.h"
#include "mozilla/Sprintf.h"
#include "mozilla/StaticPrefs_media.h"
#include "mozilla/TaskQueue.h"
#include "mozilla/Telemetry.h"
#include "nsIMemoryReporter.h"
#include "nsPrintfCString.h"
#include "nsTArray.h"

namespace mozilla {

using namespace mozilla::media;

#define NS_DispatchToMainThread(...) \
  CompileError_UseAbstractThreadDispatchInstead

// avoid redefined macro in unified build
#undef FMT
#undef LOG
#undef LOGV
#undef LOGW
#undef LOGE
#undef SFMT
#undef SLOG
#undef SLOGW
#undef SLOGE

#define FMT(x, ...) "Decoder=%p " x, mDecoderID, ##__VA_ARGS__
#define LOG(x, ...)                                                         \
  DDMOZ_LOG(gMediaDecoderLog, LogLevel::Debug, "Decoder=%p " x, mDecoderID, \
            ##__VA_ARGS__)
#define LOGV(x, ...)                                                          \
  DDMOZ_LOG(gMediaDecoderLog, LogLevel::Verbose, "Decoder=%p " x, mDecoderID, \
            ##__VA_ARGS__)
#define LOGW(x, ...) NS_WARNING(nsPrintfCString(FMT(x, ##__VA_ARGS__)).get())
#define LOGE(x, ...)                                                   \
  NS_DebugBreak(NS_DEBUG_WARNING,                                      \
                nsPrintfCString(FMT(x, ##__VA_ARGS__)).get(), nullptr, \
                __FILE__, __LINE__)

// Used by StateObject and its sub-classes
#define SFMT(x, ...)                                                     \
  "Decoder=%p state=%s " x, mMaster->mDecoderID, ToStateStr(GetState()), \
      ##__VA_ARGS__
#define SLOG(x, ...)                                                     \
  DDMOZ_LOGEX(mMaster, gMediaDecoderLog, LogLevel::Debug, "state=%s " x, \
              ToStateStr(GetState()), ##__VA_ARGS__)
#define SLOGW(x, ...) NS_WARNING(nsPrintfCString(SFMT(x, ##__VA_ARGS__)).get())
#define SLOGE(x, ...)                                                   \
  NS_DebugBreak(NS_DEBUG_WARNING,                                       \
                nsPrintfCString(SFMT(x, ##__VA_ARGS__)).get(), nullptr, \
                __FILE__, __LINE__)

// Certain constants get stored as member variables and then adjusted by various
// scale factors on a per-decoder basis. We want to make sure to avoid using
// these constants directly, so we put them in a namespace.
namespace detail {

// Resume a suspended video decoder to the current playback position plus this
// time premium for compensating the seeking delay.
static constexpr auto RESUME_VIDEO_PREMIUM = TimeUnit::FromMicroseconds(125000);

static const int64_t AMPLE_AUDIO_USECS = 2000000;

// If more than this much decoded audio is queued, we'll hold off
// decoding more audio.
static constexpr auto AMPLE_AUDIO_THRESHOLD =
    TimeUnit::FromMicroseconds(AMPLE_AUDIO_USECS);

}  // namespace detail

// If we have fewer than LOW_VIDEO_FRAMES decoded frames, and
// we're not "prerolling video", we'll skip the video up to the next keyframe
// which is at or after the current playback position.
static const uint32_t LOW_VIDEO_FRAMES = 2;

// Arbitrary "frame duration" when playing only audio.
static const uint32_t AUDIO_DURATION_USECS = 40000;

namespace detail {

// If we have less than this much buffered data available, we'll consider
// ourselves to be running low on buffered data. We determine how much
// buffered data we have remaining using the reader's GetBuffered()
// implementation.
static const int64_t LOW_BUFFER_THRESHOLD_USECS = 5000000;

static constexpr auto LOW_BUFFER_THRESHOLD =
    TimeUnit::FromMicroseconds(LOW_BUFFER_THRESHOLD_USECS);

// LOW_BUFFER_THRESHOLD_USECS needs to be greater than AMPLE_AUDIO_USECS,
// otherwise the skip-to-keyframe logic can activate when we're running low on
// data.
static_assert(LOW_BUFFER_THRESHOLD_USECS > AMPLE_AUDIO_USECS,
              "LOW_BUFFER_THRESHOLD_USECS is too small");

}  // namespace detail

// Amount of excess data to add in to the "should we buffer" calculation.
static constexpr auto EXHAUSTED_DATA_MARGIN =
    TimeUnit::FromMicroseconds(100000);

static const uint32_t MIN_VIDEO_QUEUE_SIZE = 3;
static const uint32_t MAX_VIDEO_QUEUE_SIZE = 10;
#ifdef MOZ_APPLEMEDIA
static const uint32_t HW_VIDEO_QUEUE_SIZE = 10;
#else
static const uint32_t HW_VIDEO_QUEUE_SIZE = 3;
#endif
static const uint32_t VIDEO_QUEUE_SEND_TO_COMPOSITOR_SIZE = 9999;

static uint32_t sVideoQueueDefaultSize = MAX_VIDEO_QUEUE_SIZE;
static uint32_t sVideoQueueHWAccelSize = HW_VIDEO_QUEUE_SIZE;
static uint32_t sVideoQueueSendToCompositorSize =
    VIDEO_QUEUE_SEND_TO_COMPOSITOR_SIZE;

static void InitVideoQueuePrefs() {
  MOZ_ASSERT(NS_IsMainThread());
  static bool sPrefInit = false;
  if (!sPrefInit) {
    sPrefInit = true;
    sVideoQueueDefaultSize = Preferences::GetUint(
        "media.video-queue.default-size", MAX_VIDEO_QUEUE_SIZE);
    sVideoQueueHWAccelSize = Preferences::GetUint(
        "media.video-queue.hw-accel-size", HW_VIDEO_QUEUE_SIZE);
    sVideoQueueSendToCompositorSize =
        Preferences::GetUint("media.video-queue.send-to-compositor-size",
                             VIDEO_QUEUE_SEND_TO_COMPOSITOR_SIZE);
  }
}

template <typename Type, typename Function>
static void DiscardFramesFromTail(MediaQueue<Type>& aQueue,
                                  const Function&& aTest) {
  while (aQueue.GetSize()) {
    if (aTest(aQueue.PeekBack()->mTime.ToMicroseconds())) {
      RefPtr<Type> releaseMe = aQueue.PopBack();
      continue;
    }
    break;
  }
}

// Delay, in milliseconds, that tabs needs to be in background before video
// decoding is suspended.
static TimeDuration SuspendBackgroundVideoDelay() {
  return TimeDuration::FromMilliseconds(
      StaticPrefs::media_suspend_background_video_delay_ms());
}

class MediaDecoderStateMachine::StateObject {
 public:
  virtual ~StateObject() = default;
  virtual void Exit() {}  // Exit action.
  virtual void Step() {}  // Perform a 'cycle' of this state object.
  virtual State GetState() const = 0;

  // Event handlers for various events.
  virtual void HandleAudioCaptured() {}
  virtual void HandleAudioDecoded(AudioData* aAudio) {
    Crash("Unexpected event!", __func__);
  }
  virtual void HandleVideoDecoded(VideoData* aVideo) {
    Crash("Unexpected event!", __func__);
  }
  virtual void HandleAudioWaited(MediaData::Type aType) {
    Crash("Unexpected event!", __func__);
  }
  virtual void HandleVideoWaited(MediaData::Type aType) {
    Crash("Unexpected event!", __func__);
  }
  virtual void HandleWaitingForAudio() { Crash("Unexpected event!", __func__); }
  virtual void HandleAudioCanceled() { Crash("Unexpected event!", __func__); }
  virtual void HandleEndOfAudio() { Crash("Unexpected event!", __func__); }
  virtual void HandleWaitingForVideo() { Crash("Unexpected event!", __func__); }
  virtual void HandleVideoCanceled() { Crash("Unexpected event!", __func__); }
  virtual void HandleEndOfVideo() { Crash("Unexpected event!", __func__); }

  virtual RefPtr<MediaDecoder::SeekPromise> HandleSeek(
      const SeekTarget& aTarget);

  virtual RefPtr<ShutdownPromise> HandleShutdown();

  virtual void HandleVideoSuspendTimeout() = 0;

  virtual void HandleResumeVideoDecoding(const TimeUnit& aTarget);

  virtual void HandlePlayStateChanged(MediaDecoder::PlayState aPlayState) {}

  virtual void GetDebugInfo(
      dom::MediaDecoderStateMachineDecodingStateDebugInfo& aInfo) {}

  virtual void HandleLoopingChanged() {}

 private:
  template <class S, typename R, typename... As>
  auto ReturnTypeHelper(R (S::*)(As...)) -> R;

  void Crash(const char* aReason, const char* aSite) {
    char buf[1024];
    SprintfLiteral(buf, "%s state=%s callsite=%s", aReason,
                   ToStateStr(GetState()), aSite);
    MOZ_ReportAssertionFailure(buf, __FILE__, __LINE__);
    MOZ_CRASH();
  }

 protected:
  enum class EventVisibility : int8_t { Observable, Suppressed };

  using Master = MediaDecoderStateMachine;
  explicit StateObject(Master* aPtr) : mMaster(aPtr) {}
  TaskQueue* OwnerThread() const { return mMaster->mTaskQueue; }
  ReaderProxy* Reader() const { return mMaster->mReader; }
  const MediaInfo& Info() const { return mMaster->Info(); }
  MediaQueue<AudioData>& AudioQueue() const { return mMaster->mAudioQueue; }
  MediaQueue<VideoData>& VideoQueue() const { return mMaster->mVideoQueue; }

  template <class S, typename... Args, size_t... Indexes>
  auto CallEnterMemberFunction(S* aS, std::tuple<Args...>& aTuple,
                               std::index_sequence<Indexes...>)
      -> decltype(ReturnTypeHelper(&S::Enter)) {
    AUTO_PROFILER_LABEL("StateObject::CallEnterMemberFunction", MEDIA_PLAYBACK);
    return aS->Enter(std::move(std::get<Indexes>(aTuple))...);
  }

  // Note this function will delete the current state object.
  // Don't access members to avoid UAF after this call.
  template <class S, typename... Ts>
  auto SetState(Ts&&... aArgs) -> decltype(ReturnTypeHelper(&S::Enter)) {
    // |aArgs| must be passed by reference to avoid passing MOZ_NON_PARAM class
    // SeekJob by value.  See bug 1287006 and bug 1338374.  But we still *must*
    // copy the parameters, because |Exit()| can modify them.  See bug 1312321.
    // So we 1) pass the parameters by reference, but then 2) immediately copy
    // them into a Tuple to be safe against modification, and finally 3) move
    // the elements of the Tuple into the final function call.
    auto copiedArgs = std::make_tuple(std::forward<Ts>(aArgs)...);

    // Copy mMaster which will reset to null.
    auto* master = mMaster;

    auto* s = new S(master);

    // It's possible to seek again during seeking, otherwise the new state
    // should always be different from the original one.
    MOZ_ASSERT(GetState() != s->GetState() ||
               GetState() == DECODER_STATE_SEEKING_ACCURATE ||
               GetState() == DECODER_STATE_SEEKING_FROMDORMANT ||
               GetState() == DECODER_STATE_SEEKING_NEXTFRAMESEEKING ||
               GetState() == DECODER_STATE_SEEKING_VIDEOONLY);

    SLOG("change state to: %s", ToStateStr(s->GetState()));
    PROFILER_MARKER_TEXT("MDSM::StateChange", MEDIA_PLAYBACK, {},
                         nsPrintfCString("%s", ToStateStr(s->GetState())));

    Exit();

    // Delete the old state asynchronously to avoid UAF if the caller tries to
    // access its members after SetState() returns.
    master->OwnerThread()->DispatchDirectTask(
        NS_NewRunnableFunction("MDSM::StateObject::DeleteOldState",
                               [toDelete = std::move(master->mStateObj)]() {}));
    // Also reset mMaster to catch potentail UAF.
    mMaster = nullptr;

    master->mStateObj.reset(s);
    return CallEnterMemberFunction(s, copiedArgs,
                                   std::index_sequence_for<Ts...>{});
  }

  RefPtr<MediaDecoder::SeekPromise> SetSeekingState(
      SeekJob&& aSeekJob, EventVisibility aVisibility);

  void SetDecodingState();

  // Take a raw pointer in order not to change the life cycle of MDSM.
  // It is guaranteed to be valid by MDSM.
  Master* mMaster;
};

/**
 * Purpose: decode metadata like duration and dimensions of the media resource.
 *
 * Transition to other states when decoding metadata is done:
 *   SHUTDOWN if failing to decode metadata.
 *   DECODING_FIRSTFRAME otherwise.
 */

class MediaDecoderStateMachine::DecodeMetadataState
    : public MediaDecoderStateMachine::StateObject {
 public:
  explicit DecodeMetadataState(Master* aPtr) : StateObject(aPtr) {}

  void Enter() {
    MOZ_ASSERT(!mMaster->mVideoDecodeSuspended);
    MOZ_ASSERT(!mMetadataRequest.Exists());
    SLOG("Dispatching AsyncReadMetadata");

    // We disconnect mMetadataRequest in Exit() so it is fine to capture
    // a raw pointer here.
    Reader()
        ->ReadMetadata()
        ->Then(
            OwnerThread(), __func__,
            [this](MetadataHolder&& aMetadata) {
              OnMetadataRead(std::move(aMetadata));
            },
            [this](const MediaResult& aError) { OnMetadataNotRead(aError); })
        ->Track(mMetadataRequest);
  }

  void Exit() override { mMetadataRequest.DisconnectIfExists(); }

  State GetState() const override { return DECODER_STATE_DECODING_METADATA; }

  RefPtr<MediaDecoder::SeekPromise> HandleSeek(
      const SeekTarget& aTarget) override {
    MOZ_DIAGNOSTIC_CRASH("Can't seek while decoding metadata.");
    return MediaDecoder::SeekPromise::CreateAndReject(true, __func__);
  }

  void HandleVideoSuspendTimeout() override {
    // Do nothing since no decoders are created yet.
  }

  void HandleResumeVideoDecoding(const TimeUnit&) override {
    // We never suspend video decoding in this state.
    MOZ_ASSERT(false"Shouldn't have suspended video decoding.");
  }

 private:
  void OnMetadataRead(MetadataHolder&& aMetadata);

  void OnMetadataNotRead(const MediaResult& aError) {
    AUTO_PROFILER_LABEL("DecodeMetadataState::OnMetadataNotRead",
                        MEDIA_PLAYBACK);

    mMetadataRequest.Complete();
    SLOGE("Decode metadata failed, shutting down decoder");
    mMaster->DecodeError(aError);
  }

  MozPromiseRequestHolder<MediaFormatReader::MetadataPromise> mMetadataRequest;
};

/**
 * Purpose: release decoder resources to save memory and hardware resources.
 *
 * Transition to:
 *   SEEKING if any seek request or play state changes to PLAYING.
 */

class MediaDecoderStateMachine::DormantState
    : public MediaDecoderStateMachine::StateObject {
 public:
  explicit DormantState(Master* aPtr) : StateObject(aPtr) {}

  void Enter() {
    if (mMaster->IsPlaying()) {
      mMaster->StopPlayback();
    }

    // Calculate the position to seek to when exiting dormant.
    auto t = mMaster->mMediaSink->IsStarted() ? mMaster->GetClock()
                                              : mMaster->GetMediaTime();
    mMaster->AdjustByLooping(t);
    mPendingSeek.mTarget.emplace(t, SeekTarget::Accurate);
    // SeekJob asserts |mTarget.IsValid() == !mPromise.IsEmpty()| so we
    // need to create the promise even it is not used at all.
    // The promise may be used when coming out of DormantState into
    // SeekingState.
    RefPtr<MediaDecoder::SeekPromise> x =
        mPendingSeek.mPromise.Ensure(__func__);

    // Reset the decoding state to ensure that any queued video frames are
    // released and don't consume video memory.
    mMaster->ResetDecode();

    // No need to call StopMediaSink() here.
    // We will do it during seeking when exiting dormant.

    // Ignore WAIT_FOR_DATA since we won't decode in dormant.
    mMaster->mAudioWaitRequest.DisconnectIfExists();
    mMaster->mVideoWaitRequest.DisconnectIfExists();

    MaybeReleaseResources();
  }

  void Exit() override {
    // mPendingSeek is either moved when exiting dormant or
    // should be rejected here before transition to SHUTDOWN.
    mPendingSeek.RejectIfExists(__func__);
  }

  State GetState() const override { return DECODER_STATE_DORMANT; }

  RefPtr<MediaDecoder::SeekPromise> HandleSeek(
      const SeekTarget& aTarget) override;

  void HandleVideoSuspendTimeout() override {
    // Do nothing since we've released decoders in Enter().
  }

  void HandleResumeVideoDecoding(const TimeUnit&) override {
    // Do nothing since we won't resume decoding until exiting dormant.
  }

  void HandlePlayStateChanged(MediaDecoder::PlayState aPlayState) override;

  void HandleAudioDecoded(AudioData*) override { MaybeReleaseResources(); }
  void HandleVideoDecoded(VideoData*) override { MaybeReleaseResources(); }
  void HandleWaitingForAudio() override { MaybeReleaseResources(); }
  void HandleWaitingForVideo() override { MaybeReleaseResources(); }
  void HandleAudioCanceled() override { MaybeReleaseResources(); }
  void HandleVideoCanceled() override { MaybeReleaseResources(); }
  void HandleEndOfAudio() override { MaybeReleaseResources(); }
  void HandleEndOfVideo() override { MaybeReleaseResources(); }

 private:
  void MaybeReleaseResources() {
    if (!mMaster->mAudioDataRequest.Exists() &&
        !mMaster->mVideoDataRequest.Exists()) {
      // Release decoders only when they are idle. Otherwise it might cause
      // decode error later when resetting decoders during seeking.
      mMaster->mReader->ReleaseResources();
    }
  }

  SeekJob mPendingSeek;
};

/**
 * Purpose: decode the 1st audio and video frames to fire the 'loadeddata'
 * event.
 *
 * Transition to:
 *   SHUTDOWN if any decode error.
 *   SEEKING if any seek request.
 *   DECODING/LOOPING_DECODING when the 'loadeddata' event is fired.
 */

class MediaDecoderStateMachine::DecodingFirstFrameState
    : public MediaDecoderStateMachine::StateObject {
 public:
  explicit DecodingFirstFrameState(Master* aPtr) : StateObject(aPtr) {}

  void Enter();

  void Exit() override {
    // mPendingSeek is either moved in MaybeFinishDecodeFirstFrame()
    // or should be rejected here before transition to SHUTDOWN.
    mPendingSeek.RejectIfExists(__func__);
  }

  State GetState() const override { return DECODER_STATE_DECODING_FIRSTFRAME; }

  void HandleAudioDecoded(AudioData* aAudio) override {
    mMaster->PushAudio(aAudio);
    MaybeFinishDecodeFirstFrame();
  }

  void HandleVideoDecoded(VideoData* aVideo) override {
    mMaster->PushVideo(aVideo);
    MaybeFinishDecodeFirstFrame();
  }

  void HandleWaitingForAudio() override {
    mMaster->WaitForData(MediaData::Type::AUDIO_DATA);
  }

  void HandleAudioCanceled() override { mMaster->RequestAudioData(); }

  void HandleEndOfAudio() override {
    AudioQueue().Finish();
    MaybeFinishDecodeFirstFrame();
  }

  void HandleWaitingForVideo() override {
    mMaster->WaitForData(MediaData::Type::VIDEO_DATA);
  }

  void HandleVideoCanceled() override {
    mMaster->RequestVideoData(media::TimeUnit());
  }

  void HandleEndOfVideo() override {
    VideoQueue().Finish();
    MaybeFinishDecodeFirstFrame();
  }

  void HandleAudioWaited(MediaData::Type aType) override {
    mMaster->RequestAudioData();
  }

  void HandleVideoWaited(MediaData::Type aType) override {
    mMaster->RequestVideoData(media::TimeUnit());
  }

  void HandleVideoSuspendTimeout() override {
    // Do nothing for we need to decode the 1st video frame to get the
    // dimensions.
  }

  void HandleResumeVideoDecoding(const TimeUnit&) override {
    // We never suspend video decoding in this state.
    MOZ_ASSERT(false"Shouldn't have suspended video decoding.");
  }

  RefPtr<MediaDecoder::SeekPromise> HandleSeek(
      const SeekTarget& aTarget) override {
    if (mMaster->mIsMSE) {
      return StateObject::HandleSeek(aTarget);
    }
    // Delay seek request until decoding first frames for non-MSE media.
    SLOG("Not Enough Data to seek at this stage, queuing seek");
    mPendingSeek.RejectIfExists(__func__);
    mPendingSeek.mTarget.emplace(aTarget);
    return mPendingSeek.mPromise.Ensure(__func__);
  }

 private:
  // Notify FirstFrameLoaded if having decoded first frames and
  // transition to SEEKING if there is any pending seek, or DECODING otherwise.
  void MaybeFinishDecodeFirstFrame();

  SeekJob mPendingSeek;
};

/**
 * Purpose: decode audio/video data for playback.
 *
 * Transition to:
 *   DORMANT if playback is paused for a while.
 *   SEEKING if any seek request.
 *   SHUTDOWN if any decode error.
 *   BUFFERING if playback can't continue due to lack of decoded data.
 *   COMPLETED when having decoded all audio/video data.
 *   LOOPING_DECODING when media start seamless looping
 */

class MediaDecoderStateMachine::DecodingState
    : public MediaDecoderStateMachine::StateObject {
 public:
  explicit DecodingState(Master* aPtr)
      : StateObject(aPtr), mDormantTimer(OwnerThread()) {}

  void Enter();

  void Exit() override {
    if (!mDecodeStartTime.IsNull()) {
      TimeDuration decodeDuration = TimeStamp::Now() - mDecodeStartTime;
      SLOG("Exiting DECODING, decoded for %.3lfs", decodeDuration.ToSeconds());
    }
    mDormantTimer.Reset();
    mOnAudioPopped.DisconnectIfExists();
    mOnVideoPopped.DisconnectIfExists();
  }

  void Step() override;

  State GetState() const override { return DECODER_STATE_DECODING; }

  void HandleAudioDecoded(AudioData* aAudio) override {
    mMaster->PushAudio(aAudio);
    DispatchDecodeTasksIfNeeded();
    MaybeStopPrerolling();
  }

  void HandleVideoDecoded(VideoData* aVideo) override {
    // We only do this check when we're not looping, which can be known by
    // checking the queue's offset.
    const auto currentTime = mMaster->GetMediaTime();
    if (aVideo->GetEndTime() < currentTime &&
        VideoQueue().GetOffset() == media::TimeUnit::Zero()) {
      if (!mVideoFirstLateTime) {
        mVideoFirstLateTime = Some(TimeStamp::Now());
      }
      PROFILER_MARKER("Video falling behind", MEDIA_PLAYBACK, {},
                      VideoFallingBehindMarker, aVideo->mTime.ToMicroseconds(),
                      currentTime.ToMicroseconds());
      SLOG("video %" PRId64 " starts being late (current=%" PRId64 ")",
           aVideo->mTime.ToMicroseconds(), currentTime.ToMicroseconds());
    } else {
      mVideoFirstLateTime.reset();
    }
    mMaster->PushVideo(aVideo);
    DispatchDecodeTasksIfNeeded();
    MaybeStopPrerolling();
  }

  void HandleAudioCanceled() override { mMaster->RequestAudioData(); }

  void HandleVideoCanceled() override {
    mMaster->RequestVideoData(mMaster->GetMediaTime(),
                              ShouldRequestNextKeyFrame());
  }

  void HandleEndOfAudio() override;
  void HandleEndOfVideo() override;

  void HandleWaitingForAudio() override {
    mMaster->WaitForData(MediaData::Type::AUDIO_DATA);
    MaybeStopPrerolling();
  }

  void HandleWaitingForVideo() override {
    mMaster->WaitForData(MediaData::Type::VIDEO_DATA);
    MaybeStopPrerolling();
  }

  void HandleAudioWaited(MediaData::Type aType) override {
    mMaster->RequestAudioData();
  }

  void HandleVideoWaited(MediaData::Type aType) override {
    mMaster->RequestVideoData(mMaster->GetMediaTime(),
                              ShouldRequestNextKeyFrame());
  }

  void HandleAudioCaptured() override {
    MaybeStopPrerolling();
    // MediaSink is changed. Schedule Step() to check if we can start playback.
    mMaster->ScheduleStateMachine();
  }

  void HandleVideoSuspendTimeout() override {
    // No video, so nothing to suspend.
    if (!mMaster->HasVideo()) {
      return;
    }

    PROFILER_MARKER_UNTYPED("MDSM::EnterVideoSuspend", MEDIA_PLAYBACK);
    mMaster->mVideoDecodeSuspended = true;
    mMaster->mOnPlaybackEvent.Notify(MediaPlaybackEvent::EnterVideoSuspend);
    Reader()->SetVideoBlankDecode(true);
  }

  void HandlePlayStateChanged(MediaDecoder::PlayState aPlayState) override {
    // Schedule Step() to check if we can start or stop playback.
    mMaster->ScheduleStateMachine();
    if (aPlayState == MediaDecoder::PLAY_STATE_PLAYING) {
      // Try to dispatch decoding tasks for mMinimizePreroll might be reset.
      DispatchDecodeTasksIfNeeded();
    }

    if (aPlayState == MediaDecoder::PLAY_STATE_PAUSED) {
      StartDormantTimer();
      mVideoFirstLateTime.reset();
    } else {
      mDormantTimer.Reset();
    }
  }

  void GetDebugInfo(
      dom::MediaDecoderStateMachineDecodingStateDebugInfo& aInfo) override {
    aInfo.mIsPrerolling = mIsPrerolling;
  }

  void HandleLoopingChanged() override { SetDecodingState(); }

 protected:
  virtual void EnsureAudioDecodeTaskQueued();
  virtual void EnsureVideoDecodeTaskQueued();

  virtual bool ShouldStopPrerolling() const {
    return mIsPrerolling &&
           (DonePrerollingAudio() ||
            IsWaitingData(MediaData::Type::AUDIO_DATA)) &&
           (DonePrerollingVideo() ||
            IsWaitingData(MediaData::Type::VIDEO_DATA));
  }

  virtual bool IsWaitingData(MediaData::Type aType) const {
    if (aType == MediaData::Type::AUDIO_DATA) {
      return mMaster->IsWaitingAudioData();
    }
    MOZ_ASSERT(aType == MediaData::Type::VIDEO_DATA);
    return mMaster->IsWaitingVideoData();
  }

  void MaybeStopPrerolling() {
    if (ShouldStopPrerolling()) {
      mIsPrerolling = false;
      // Check if we can start playback.
      mMaster->ScheduleStateMachine();
    }
  }

  bool ShouldRequestNextKeyFrame() const {
    if (!mVideoFirstLateTime) {
      return false;
    }
    const double elapsedTimeMs =
        (TimeStamp::Now() - *mVideoFirstLateTime).ToMilliseconds();
    const bool rv = elapsedTimeMs >=
                    StaticPrefs::media_decoder_skip_when_video_too_slow_ms();
    if (rv) {
      PROFILER_MARKER_UNTYPED("Skipping to next keyframe", MEDIA_PLAYBACK);
      SLOG(
          "video has been late behind media time for %f ms, should skip to "
          "next key frame",
          elapsedTimeMs);
    }
    return rv;
  }

  virtual bool IsBufferingAllowed() const { return true; }

 private:
  void DispatchDecodeTasksIfNeeded();
  void MaybeStartBuffering();

  // At the start of decoding we want to "preroll" the decode until we've
  // got a few frames decoded before we consider whether decode is falling
  // behind. Otherwise our "we're falling behind" logic will trigger
  // unnecessarily if we start playing as soon as the first sample is
  // decoded. These two fields store how many video frames and audio
  // samples we must consume before are considered to be finished prerolling.
  TimeUnit AudioPrerollThreshold() const {
    return (mMaster->mAmpleAudioThreshold / 2)
        .MultDouble(mMaster->mPlaybackRate);
  }

  uint32_t VideoPrerollFrames() const {
    return std::min(
        static_cast<uint32_t>(
            mMaster->GetAmpleVideoFrames() / 2. * mMaster->mPlaybackRate + 1),
        sVideoQueueDefaultSize);
  }

  bool DonePrerollingAudio() const {
    return !mMaster->IsAudioDecoding() ||
           mMaster->GetDecodedAudioDuration() >= AudioPrerollThreshold();
  }

  bool DonePrerollingVideo() const {
    return !mMaster->IsVideoDecoding() ||
           static_cast<uint32_t>(mMaster->VideoQueue().GetSize()) >=
               VideoPrerollFrames();
  }

  void StartDormantTimer() {
    if (!mMaster->mMediaSeekable) {
      // Don't enter dormant if the media is not seekable because we need to
      // seek when exiting dormant.
      return;
    }

    auto timeout = StaticPrefs::media_dormant_on_pause_timeout_ms();
    if (timeout < 0) {
      // Disabled when timeout is negative.
      return;
    }

    if (timeout == 0) {
      // Enter dormant immediately without scheduling a timer.
      SetState<DormantState>();
      return;
    }

    if (mMaster->mMinimizePreroll) {
      SetState<DormantState>();
      return;
    }

    TimeStamp target =
        TimeStamp::Now() + TimeDuration::FromMilliseconds(timeout);

    mDormantTimer.Ensure(
        target,
        [this]() {
          AUTO_PROFILER_LABEL("DecodingState::StartDormantTimer:SetDormant",
                              MEDIA_PLAYBACK);
          mDormantTimer.CompleteRequest();
          SetState<DormantState>();
        },
        [this]() { mDormantTimer.CompleteRequest(); });
  }

  // Time at which we started decoding.
  TimeStamp mDecodeStartTime;

  // When we start decoding (either for the first time, or after a pause)
  // we may be low on decoded data. We don't want our "low data" logic to
  // kick in and decide that we're low on decoded data because the download
  // can't keep up with the decode, and cause us to pause playback. So we
  // have a "preroll" stage, where we ignore the results of our "low data"
  // logic during the first few frames of our decode. This occurs during
  // playback.
  bool mIsPrerolling = true;

  // Fired when playback is paused for a while to enter dormant.
  DelayedScheduler<TimeStamp> mDormantTimer;

  MediaEventListener mOnAudioPopped;
  MediaEventListener mOnVideoPopped;

  // If video has been later than the media time, this will records when the
  // video started being late. It will be reset once video catches up with the
  // media time.
  Maybe<TimeStamp> mVideoFirstLateTime;
};

/**
 * Purpose: decode audio data for playback when media is in seamless
 * looping, we will adjust media time to make samples time monotonically
 * increasing. All its methods runs on its owner thread (MDSM thread).
 *
 * Transition to:
 *   DORMANT if playback is paused for a while.
 *   SEEKING if any seek request.
 *   SHUTDOWN if any decode error.
 *   BUFFERING if playback can't continue due to lack of decoded data.
 *   COMPLETED when the media resource is closed and no data is available
 *             anymore.
 *   DECODING when media stops seamless looping.
 */

class MediaDecoderStateMachine::LoopingDecodingState
    : public MediaDecoderStateMachine::DecodingState {
 public:
  explicit LoopingDecodingState(Master* aPtr)
      : DecodingState(aPtr),
        mIsReachingAudioEOS(!mMaster->IsAudioDecoding()),
        mIsReachingVideoEOS(!mMaster->IsVideoDecoding()),
        mAudioEndedBeforeEnteringStateWithoutDuration(false),
        mVideoEndedBeforeEnteringStateWithoutDuration(false) {
    MOZ_ASSERT(mMaster->mLooping);
    SLOG(
        "LoopingDecodingState ctor, mIsReachingAudioEOS=%d, "
        "mIsReachingVideoEOS=%d",
        mIsReachingAudioEOS, mIsReachingVideoEOS);
    // If the track has reached EOS and we already have its last data, then we
    // can know its duration. But if playback starts from EOS (due to seeking),
    // the decoded end time would be zero because none of data gets decoded yet.
    if (mIsReachingAudioEOS) {
      if (mMaster->HasLastDecodedData(MediaData::Type::AUDIO_DATA) &&
          !mMaster->mAudioTrackDecodedDuration) {
        mMaster->mAudioTrackDecodedDuration.emplace(
            mMaster->mDecodedAudioEndTime);
        SLOG("determine mAudioTrackDecodedDuration");
      } else {
        mAudioEndedBeforeEnteringStateWithoutDuration = true;
        SLOG("still don't know mAudioTrackDecodedDuration");
      }
    }

    if (mIsReachingVideoEOS) {
      if (mMaster->HasLastDecodedData(MediaData::Type::VIDEO_DATA) &&
          !mMaster->mVideoTrackDecodedDuration) {
        mMaster->mVideoTrackDecodedDuration.emplace(
            mMaster->mDecodedVideoEndTime);
        SLOG("determine mVideoTrackDecodedDuration");
      } else {
        mVideoEndedBeforeEnteringStateWithoutDuration = true;
        SLOG("still don't know mVideoTrackDecodedDuration");
      }
    }

    // We might be able to determine the duration already, let's check.
    if (mIsReachingAudioEOS || mIsReachingVideoEOS) {
      Unused << DetermineOriginalDecodedDurationIfNeeded();
    }

    // If we've looped at least once before, then we need to update queue offset
    // correctly to make the media data time and the clock time consistent.
    // Otherwise, it would cause a/v desync.
    if (mMaster->mOriginalDecodedDuration != media::TimeUnit::Zero()) {
      if (mIsReachingAudioEOS && mMaster->HasAudio()) {
        AudioQueue().SetOffset(AudioQueue().GetOffset() +
                               mMaster->mOriginalDecodedDuration);
      }
      if (mIsReachingVideoEOS && mMaster->HasVideo()) {
        VideoQueue().SetOffset(VideoQueue().GetOffset() +
                               mMaster->mOriginalDecodedDuration);
      }
    }
  }

  void Enter() {
    if (mMaster->HasAudio() && mIsReachingAudioEOS) {
      SLOG("audio has ended, request the data again.");
      RequestDataFromStartPosition(TrackInfo::TrackType::kAudioTrack);
    }
    if (mMaster->HasVideo() && mIsReachingVideoEOS) {
      SLOG("video has ended, request the data again.");
      RequestDataFromStartPosition(TrackInfo::TrackType::kVideoTrack);
    }
    DecodingState::Enter();
  }

  void Exit() override {
    MOZ_DIAGNOSTIC_ASSERT(mMaster->OnTaskQueue());
    SLOG("Leaving looping state, offset [a=%" PRId64 ",v=%" PRId64
         "], endtime [a=%" PRId64 ",v=%" PRId64 "], track duration [a=%" PRId64
         ",v=%" PRId64 "], waiting=%s",
         AudioQueue().GetOffset().ToMicroseconds(),
         VideoQueue().GetOffset().ToMicroseconds(),
         mMaster->mDecodedAudioEndTime.ToMicroseconds(),
         mMaster->mDecodedVideoEndTime.ToMicroseconds(),
         mMaster->mAudioTrackDecodedDuration
             ? mMaster->mAudioTrackDecodedDuration->ToMicroseconds()
             : 0,
         mMaster->mVideoTrackDecodedDuration
             ? mMaster->mVideoTrackDecodedDuration->ToMicroseconds()
             : 0,
         mDataWaitingTimestampAdjustment
             ? MediaData::EnumValueToString(
                   mDataWaitingTimestampAdjustment->mType)
             : "none");
    if (ShouldDiscardLoopedData(MediaData::Type::AUDIO_DATA)) {
      DiscardLoopedData(MediaData::Type::AUDIO_DATA);
    }
    if (ShouldDiscardLoopedData(MediaData::Type::VIDEO_DATA)) {
      DiscardLoopedData(MediaData::Type::VIDEO_DATA);
    }

    if (mMaster->HasAudio() && HasDecodedLastAudioFrame()) {
      SLOG("Mark audio queue as finished");
      mMaster->mAudioDataRequest.DisconnectIfExists();
      mMaster->mAudioWaitRequest.DisconnectIfExists();
      AudioQueue().Finish();
    }
    if (mMaster->HasVideo() && HasDecodedLastVideoFrame()) {
      SLOG("Mark video queue as finished");
      mMaster->mVideoDataRequest.DisconnectIfExists();
      mMaster->mVideoWaitRequest.DisconnectIfExists();
      VideoQueue().Finish();
    }

    // Clear waiting data should be done after marking queue as finished.
    mDataWaitingTimestampAdjustment = nullptr;

    mAudioDataRequest.DisconnectIfExists();
    mVideoDataRequest.DisconnectIfExists();
    mAudioSeekRequest.DisconnectIfExists();
    mVideoSeekRequest.DisconnectIfExists();
    DecodingState::Exit();
  }

  ~LoopingDecodingState() {
    MOZ_DIAGNOSTIC_ASSERT(!mAudioDataRequest.Exists());
    MOZ_DIAGNOSTIC_ASSERT(!mVideoDataRequest.Exists());
    MOZ_DIAGNOSTIC_ASSERT(!mAudioSeekRequest.Exists());
    MOZ_DIAGNOSTIC_ASSERT(!mVideoSeekRequest.Exists());
  }

  State GetState() const override { return DECODER_STATE_LOOPING_DECODING; }

  void HandleAudioDecoded(AudioData* aAudio) override {
    // TODO : check if we need to update mOriginalDecodedDuration

    // After pushing data to the queue, timestamp might be adjusted.
    DecodingState::HandleAudioDecoded(aAudio);
    mMaster->mDecodedAudioEndTime =
        std::max(aAudio->GetEndTime(), mMaster->mDecodedAudioEndTime);
    SLOG("audio sample after time-adjustment [%" PRId64 ",%" PRId64 "]",
         aAudio->mTime.ToMicroseconds(), aAudio->GetEndTime().ToMicroseconds());
  }

  void HandleVideoDecoded(VideoData* aVideo) override {
    // TODO : check if we need to update mOriginalDecodedDuration

    // Here sample still keeps its original timestamp.

    // This indicates there is a shorter audio track, and it's the first time in
    // the looping (audio ends but video is playing) so that we haven't been
    // able to determine the decoded duration. Therefore, we fill the gap
    // between two tracks before video ends. Afterward, this adjustment will be
    // done in `HandleEndOfAudio()`.
    if (mMaster->mOriginalDecodedDuration == media::TimeUnit::Zero() &&
        mMaster->mAudioTrackDecodedDuration &&
        aVideo->GetEndTime() > *mMaster->mAudioTrackDecodedDuration) {
      media::TimeUnit gap;
      // First time we fill gap between the video frame to the last audio.
      if (auto prevVideo = VideoQueue().PeekBack();
          prevVideo &&
          prevVideo->GetEndTime() < *mMaster->mAudioTrackDecodedDuration) {
        gap =
            aVideo->GetEndTime().ToBase(*mMaster->mAudioTrackDecodedDuration) -
            *mMaster->mAudioTrackDecodedDuration;
      }
      // Then fill the gap for all following videos.
      else {
        gap = aVideo->mDuration.ToBase(*mMaster->mAudioTrackDecodedDuration);
      }
      SLOG("Longer video %" PRId64 "%s (audio-durtaion=%" PRId64
           "%s), insert silence to fill the gap %" PRId64 "%s",
           aVideo->GetEndTime().ToMicroseconds(),
           aVideo->GetEndTime().ToString().get(),
           mMaster->mAudioTrackDecodedDuration->ToMicroseconds(),
           mMaster->mAudioTrackDecodedDuration->ToString().get(),
           gap.ToMicroseconds(), gap.ToString().get());
      PushFakeAudioDataIfNeeded(gap);
    }

    // After pushing data to the queue, timestamp might be adjusted.
    DecodingState::HandleVideoDecoded(aVideo);
    mMaster->mDecodedVideoEndTime =
        std::max(aVideo->GetEndTime(), mMaster->mDecodedVideoEndTime);
    SLOG("video sample after time-adjustment [%" PRId64 ",%" PRId64 "]",
         aVideo->mTime.ToMicroseconds(), aVideo->GetEndTime().ToMicroseconds());
  }

  void HandleEndOfAudio() override {
    mIsReachingAudioEOS = true;
    if (!mMaster->mAudioTrackDecodedDuration &&
        mMaster->HasLastDecodedData(MediaData::Type::AUDIO_DATA)) {
      mMaster->mAudioTrackDecodedDuration.emplace(
          mMaster->mDecodedAudioEndTime);
    }
    if (DetermineOriginalDecodedDurationIfNeeded()) {
      AudioQueue().SetOffset(AudioQueue().GetOffset() +
                             mMaster->mOriginalDecodedDuration);
    }

    // This indicates that the audio track is shorter than the video track, so
    // we need to add some silence to fill the gap.
    if (mMaster->mAudioTrackDecodedDuration &&
        mMaster->mOriginalDecodedDuration >
            *mMaster->mAudioTrackDecodedDuration) {
      MOZ_ASSERT(mMaster->HasVideo());
      MOZ_ASSERT(mMaster->mVideoTrackDecodedDuration);
      MOZ_ASSERT(mMaster->mOriginalDecodedDuration ==
                 *mMaster->mVideoTrackDecodedDuration);
      auto gap = mMaster->mOriginalDecodedDuration.ToBase(
                     *mMaster->mAudioTrackDecodedDuration) -
                 *mMaster->mAudioTrackDecodedDuration;
      SLOG(
          "Audio track is shorter than the original decoded duration "
          "(a=%" PRId64 "%s, t=%" PRId64
          "%s), insert silence to fill the gap %" PRId64 "%s",
          mMaster->mAudioTrackDecodedDuration->ToMicroseconds(),
          mMaster->mAudioTrackDecodedDuration->ToString().get(),
          mMaster->mOriginalDecodedDuration.ToMicroseconds(),
          mMaster->mOriginalDecodedDuration.ToString().get(),
          gap.ToMicroseconds(), gap.ToString().get());
      PushFakeAudioDataIfNeeded(gap);
    }

    SLOG(
        "received audio EOS when seamless looping, starts seeking, "
        "audioLoopingOffset=[%" PRId64 "], mAudioTrackDecodedDuration=[%" PRId64
        "]",
        AudioQueue().GetOffset().ToMicroseconds(),
        mMaster->mAudioTrackDecodedDuration->ToMicroseconds());
    if (!IsRequestingDataFromStartPosition(MediaData::Type::AUDIO_DATA)) {
      RequestDataFromStartPosition(TrackInfo::TrackType::kAudioTrack);
    }
    ProcessSamplesWaitingAdjustmentIfAny();
  }

  void HandleEndOfVideo() override {
    mIsReachingVideoEOS = true;
    if (!mMaster->mVideoTrackDecodedDuration &&
        mMaster->HasLastDecodedData(MediaData::Type::VIDEO_DATA)) {
      mMaster->mVideoTrackDecodedDuration.emplace(
          mMaster->mDecodedVideoEndTime);
    }
    if (DetermineOriginalDecodedDurationIfNeeded()) {
      VideoQueue().SetOffset(VideoQueue().GetOffset() +
                             mMaster->mOriginalDecodedDuration);
    }

    SLOG(
        "received video EOS when seamless looping, starts seeking, "
        "videoLoopingOffset=[%" PRId64 "], mVideoTrackDecodedDuration=[%" PRId64
        "]",
        VideoQueue().GetOffset().ToMicroseconds(),
        mMaster->mVideoTrackDecodedDuration->ToMicroseconds());
    if (!IsRequestingDataFromStartPosition(MediaData::Type::VIDEO_DATA)) {
      RequestDataFromStartPosition(TrackInfo::TrackType::kVideoTrack);
    }
    ProcessSamplesWaitingAdjustmentIfAny();
  }

 private:
  void RequestDataFromStartPosition(TrackInfo::TrackType aType) {
    MOZ_DIAGNOSTIC_ASSERT(aType == TrackInfo::TrackType::kAudioTrack ||
                          aType == TrackInfo::TrackType::kVideoTrack);

    const bool isAudio = aType == TrackInfo::TrackType::kAudioTrack;
    MOZ_ASSERT_IF(isAudio, mMaster->HasAudio());
    MOZ_ASSERT_IF(!isAudio, mMaster->HasVideo());

    if (IsReaderSeeking()) {
      MOZ_ASSERT(!mPendingSeekingType);
      mPendingSeekingType = Some(aType);
      SLOG("Delay %s seeking until the reader finishes current seeking",
           isAudio ? "audio" : "video");
      return;
    }

    auto& seekRequest = isAudio ? mAudioSeekRequest : mVideoSeekRequest;
    Reader()->ResetDecode(aType);
    Reader()
        ->Seek(SeekTarget(media::TimeUnit::Zero(), SeekTarget::Type::Accurate,
                          isAudio ? SeekTarget::Track::AudioOnly
                                  : SeekTarget::Track::VideoOnly))
        ->Then(
            OwnerThread(), __func__,
            [this, isAudio, master = RefPtr{mMaster}]() mutable -> void {
              AUTO_PROFILER_LABEL(
                  nsPrintfCString(
                      "LoopingDecodingState::RequestDataFromStartPosition(%s)::"
                      "SeekResolved",
                      isAudio ? "audio" : "video")
                      .get(),
                  MEDIA_PLAYBACK);
              if (auto& state = master->mStateObj;
                  state &&
                  state->GetState() != DECODER_STATE_LOOPING_DECODING) {
                MOZ_RELEASE_ASSERT(false"This shouldn't happen!");
                return;
              }
              if (isAudio) {
                mAudioSeekRequest.Complete();
              } else {
                mVideoSeekRequest.Complete();
              }
              SLOG(
                  "seeking completed, start to request first %s sample "
                  "(queued=%zu, decoder-queued=%zu)",
                  isAudio ? "audio" : "video",
                  isAudio ? AudioQueue().GetSize() : VideoQueue().GetSize(),
                  isAudio ? Reader()->SizeOfAudioQueueInFrames()
                          : Reader()->SizeOfVideoQueueInFrames());
              if (isAudio) {
                RequestAudioDataFromReaderAfterEOS();
              } else {
                RequestVideoDataFromReaderAfterEOS();
              }
              if (mPendingSeekingType) {
                auto seekingType = *mPendingSeekingType;
                mPendingSeekingType.reset();
                SLOG("Perform pending %s seeking", TrackTypeToStr(seekingType));
                RequestDataFromStartPosition(seekingType);
              }
            },
            [this, isAudio, master = RefPtr{mMaster}](
                const SeekRejectValue& aReject) mutable -> void {
              AUTO_PROFILER_LABEL(
                  nsPrintfCString("LoopingDecodingState::"
                                  "RequestDataFromStartPosition(%s)::"
                                  "SeekRejected",
                                  isAudio ? "audio" : "video")
                      .get(),
                  MEDIA_PLAYBACK);
              if (auto& state = master->mStateObj;
                  state &&
                  state->GetState() != DECODER_STATE_LOOPING_DECODING) {
                MOZ_RELEASE_ASSERT(false"This shouldn't happen!");
                return;
              }
              if (isAudio) {
                mAudioSeekRequest.Complete();
              } else {
                mVideoSeekRequest.Complete();
              }
              HandleError(aReject.mError, isAudio);
            })
        ->Track(seekRequest);
  }

  void RequestAudioDataFromReaderAfterEOS() {
    MOZ_ASSERT(mMaster->HasAudio());
    Reader()
        ->RequestAudioData()
        ->Then(
            OwnerThread(), __func__,
            [this, master = RefPtr{mMaster}](const RefPtr<AudioData>& aAudio) {
              AUTO_PROFILER_LABEL(
                  "LoopingDecodingState::"
                  "RequestAudioDataFromReader::"
                  "RequestDataResolved",
                  MEDIA_PLAYBACK);
              if (auto& state = master->mStateObj;
                  state &&
                  state->GetState() != DECODER_STATE_LOOPING_DECODING) {
                MOZ_RELEASE_ASSERT(false"This shouldn't happen!");
                return;
              }
              mIsReachingAudioEOS = false;
              mAudioDataRequest.Complete();
              SLOG(
                  "got audio decoded sample "
                  "[%" PRId64 ",%" PRId64 "]",
                  aAudio->mTime.ToMicroseconds(),
                  aAudio->GetEndTime().ToMicroseconds());
              if (ShouldPutDataOnWaiting(MediaData::Type::AUDIO_DATA)) {
                SLOG(
                    "decoded audio sample needs to wait for timestamp "
                    "adjustment after EOS");
                PutDataOnWaiting(aAudio);
                return;
              }
              HandleAudioDecoded(aAudio);
              ProcessSamplesWaitingAdjustmentIfAny();
            },
            [this, master = RefPtr{mMaster}](const MediaResult& aError) {
              AUTO_PROFILER_LABEL(
                  "LoopingDecodingState::"
                  "RequestAudioDataFromReader::"
                  "RequestDataRejected",
                  MEDIA_PLAYBACK);
              if (auto& state = master->mStateObj;
                  state &&
                  state->GetState() != DECODER_STATE_LOOPING_DECODING) {
                MOZ_RELEASE_ASSERT(false"This shouldn't happen!");
                return;
              }
              mAudioDataRequest.Complete();
              HandleError(aError, true /* isAudio */);
            })
        ->Track(mAudioDataRequest);
  }

  void RequestVideoDataFromReaderAfterEOS() {
    MOZ_ASSERT(mMaster->HasVideo());
    Reader()
        ->RequestVideoData(media::TimeUnit(),
                           false /* aRequestNextVideoKeyFrame */)
        ->Then(
            OwnerThread(), __func__,
            [this, master = RefPtr{mMaster}](const RefPtr<VideoData>& aVideo) {
              AUTO_PROFILER_LABEL(
                  "LoopingDecodingState::"
                  "RequestVideoDataFromReaderAfterEOS()::"
                  "RequestDataResolved",
                  MEDIA_PLAYBACK);
              if (auto& state = master->mStateObj;
                  state &&
                  state->GetState() != DECODER_STATE_LOOPING_DECODING) {
                MOZ_RELEASE_ASSERT(false"This shouldn't happen!");
                return;
              }
              mIsReachingVideoEOS = false;
              mVideoDataRequest.Complete();
              SLOG(
                  "got video decoded sample "
                  "[%" PRId64 ",%" PRId64 "]",
                  aVideo->mTime.ToMicroseconds(),
                  aVideo->GetEndTime().ToMicroseconds());
              if (ShouldPutDataOnWaiting(MediaData::Type::VIDEO_DATA)) {
                SLOG(
                    "decoded video sample needs to wait for timestamp "
                    "adjustment after EOS");
                PutDataOnWaiting(aVideo);
                return;
              }
              mMaster->mBypassingSkipToNextKeyFrameCheck = true;
              HandleVideoDecoded(aVideo);
              ProcessSamplesWaitingAdjustmentIfAny();
            },
            [this, master = RefPtr{mMaster}](const MediaResult& aError) {
              AUTO_PROFILER_LABEL(
                  "LoopingDecodingState::"
                  "RequestVideoDataFromReaderAfterEOS()::"
                  "RequestDataRejected",
                  MEDIA_PLAYBACK);
              if (auto& state = master->mStateObj;
                  state &&
                  state->GetState() != DECODER_STATE_LOOPING_DECODING) {
                MOZ_RELEASE_ASSERT(false"This shouldn't happen!");
                return;
              }
              mVideoDataRequest.Complete();
              HandleError(aError, false /* isAudio */);
            })
        ->Track(mVideoDataRequest);
  }

  void HandleError(const MediaResult& aError, bool aIsAudio);

  bool ShouldRequestData(MediaData::Type aType) const {
    MOZ_DIAGNOSTIC_ASSERT(aType == MediaData::Type::AUDIO_DATA ||
                          aType == MediaData::Type::VIDEO_DATA);

    if (aType == MediaData::Type::AUDIO_DATA &&
        (mAudioSeekRequest.Exists() || mAudioDataRequest.Exists() ||
         IsDataWaitingForTimestampAdjustment(MediaData::Type::AUDIO_DATA) ||
         mMaster->IsWaitingAudioData())) {
      return false;
    }
    if (aType == MediaData::Type::VIDEO_DATA &&
        (mVideoSeekRequest.Exists() || mVideoDataRequest.Exists() ||
         IsDataWaitingForTimestampAdjustment(MediaData::Type::VIDEO_DATA) ||
         mMaster->IsWaitingVideoData())) {
      return false;
    }
    return true;
  }

  void HandleAudioCanceled() override {
    if (ShouldRequestData(MediaData::Type::AUDIO_DATA)) {
      mMaster->RequestAudioData();
    }
  }

  void HandleAudioWaited(MediaData::Type aType) override {
    if (ShouldRequestData(MediaData::Type::AUDIO_DATA)) {
      mMaster->RequestAudioData();
    }
  }

  void HandleVideoCanceled() override {
    if (ShouldRequestData(MediaData::Type::VIDEO_DATA)) {
      mMaster->RequestVideoData(mMaster->GetMediaTime(),
                                ShouldRequestNextKeyFrame());
    };
  }

  void HandleVideoWaited(MediaData::Type aType) override {
    if (ShouldRequestData(MediaData::Type::VIDEO_DATA)) {
      mMaster->RequestVideoData(mMaster->GetMediaTime(),
                                ShouldRequestNextKeyFrame());
    };
  }

  void EnsureAudioDecodeTaskQueued() override {
    if (!ShouldRequestData(MediaData::Type::AUDIO_DATA)) {
      return;
    }
    DecodingState::EnsureAudioDecodeTaskQueued();
  }

  void EnsureVideoDecodeTaskQueued() override {
    if (!ShouldRequestData(MediaData::Type::VIDEO_DATA)) {
      return;
    }
    DecodingState::EnsureVideoDecodeTaskQueued();
  }

  bool DetermineOriginalDecodedDurationIfNeeded() {
    // Duration would only need to be set once, unless we get more data which is
    // larger than the duration. That can happen on MSE (reopen stream).
    if (mMaster->mOriginalDecodedDuration != media::TimeUnit::Zero()) {
      return true;
    }

    // Single track situations
    if (mMaster->HasAudio() && !mMaster->HasVideo() &&
        mMaster->mAudioTrackDecodedDuration) {
      mMaster->mOriginalDecodedDuration = *mMaster->mAudioTrackDecodedDuration;
      SLOG("audio only, duration=%" PRId64,
           mMaster->mOriginalDecodedDuration.ToMicroseconds());
      return true;
    }
    if (mMaster->HasVideo() && !mMaster->HasAudio() &&
        mMaster->mVideoTrackDecodedDuration) {
      mMaster->mOriginalDecodedDuration = *mMaster->mVideoTrackDecodedDuration;
      SLOG("video only, duration=%" PRId64,
           mMaster->mOriginalDecodedDuration.ToMicroseconds());
      return true;
    }
    // Two tracks situation
    if (mMaster->HasAudio() && mMaster->HasVideo()) {
      // Both tracks have ended so that we can check which track is longer.
      if (mMaster->mAudioTrackDecodedDuration &&
          mMaster->mVideoTrackDecodedDuration) {
        mMaster->mOriginalDecodedDuration =
            std::max(*mMaster->mVideoTrackDecodedDuration,
                     *mMaster->mAudioTrackDecodedDuration);
        SLOG("Both tracks ended, original duration=%" PRId64 " (a=%" PRId64
             ", v=%" PRId64 ")",
             mMaster->mOriginalDecodedDuration.ToMicroseconds(),
             mMaster->mAudioTrackDecodedDuration->ToMicroseconds(),
             mMaster->mVideoTrackDecodedDuration->ToMicroseconds());
        return true;
      }
      // When entering the state, video has ended but audio hasn't, which means
      // audio is longer.
      if (mMaster->mAudioTrackDecodedDuration &&
          mVideoEndedBeforeEnteringStateWithoutDuration) {
        mMaster->mOriginalDecodedDuration =
            *mMaster->mAudioTrackDecodedDuration;
        mVideoEndedBeforeEnteringStateWithoutDuration = false;
        SLOG("audio is longer, duration=%" PRId64,
             mMaster->mOriginalDecodedDuration.ToMicroseconds());
        return true;
      }
      // When entering the state, audio has ended but video hasn't, which means
      // video is longer.
      if (mMaster->mVideoTrackDecodedDuration &&
          mAudioEndedBeforeEnteringStateWithoutDuration) {
        mMaster->mOriginalDecodedDuration =
            *mMaster->mVideoTrackDecodedDuration;
        mAudioEndedBeforeEnteringStateWithoutDuration = false;
        SLOG("video is longer, duration=%" PRId64,
             mMaster->mOriginalDecodedDuration.ToMicroseconds());
        return true;
      }
      SLOG("Still waiting for another track ends...");
      MOZ_ASSERT(!mMaster->mAudioTrackDecodedDuration ||
                 !mMaster->mVideoTrackDecodedDuration);
    }
    SLOG("can't determine the original decoded duration yet");
    MOZ_ASSERT(mMaster->mOriginalDecodedDuration == media::TimeUnit::Zero());
    return false;
  }

  void ProcessSamplesWaitingAdjustmentIfAny() {
    if (!mDataWaitingTimestampAdjustment) {
      return;
    }

    RefPtr<MediaData> data = mDataWaitingTimestampAdjustment;
    mDataWaitingTimestampAdjustment = nullptr;
    const bool isAudio = data->mType == MediaData::Type::AUDIO_DATA;
    SLOG("process %s sample waiting for timestamp adjustment",
         isAudio ? "audio" : "video");
    if (isAudio) {
      // Waiting sample is for next round of looping, so the queue offset
      // shouldn't be zero. This happens when the track has reached EOS before
      // entering the state (and looping never happens before). Same for below
      // video case.
      if (AudioQueue().GetOffset() == media::TimeUnit::Zero()) {
        AudioQueue().SetOffset(mMaster->mOriginalDecodedDuration);
      }
      HandleAudioDecoded(data->As<AudioData>());
    } else {
      MOZ_DIAGNOSTIC_ASSERT(data->mType == MediaData::Type::VIDEO_DATA);
      if (VideoQueue().GetOffset() == media::TimeUnit::Zero()) {
        VideoQueue().SetOffset(mMaster->mOriginalDecodedDuration);
      }
      HandleVideoDecoded(data->As<VideoData>());
    }
  }

  bool IsDataWaitingForTimestampAdjustment(MediaData::Type aType) const {
    return mDataWaitingTimestampAdjustment &&
           mDataWaitingTimestampAdjustment->mType == aType;
  }

  bool ShouldPutDataOnWaiting(MediaData::Type aType) const {
    // If another track is already waiting, this track shouldn't be waiting.
    // This case only happens when both tracks reached EOS before entering the
    // looping decoding state, so we don't know the decoded duration yet (used
    // to adjust timestamp) But this is fine, because both tracks will start
    // from 0 so we don't need to adjust them now.
    if (mDataWaitingTimestampAdjustment &&
        !IsDataWaitingForTimestampAdjustment(aType)) {
      return false;
    }

    // Only have one track, no need to wait.
    if ((aType == MediaData::Type::AUDIO_DATA && !mMaster->HasVideo()) ||
        (aType == MediaData::Type::VIDEO_DATA && !mMaster->HasAudio())) {
      return false;
    }

    // We don't know the duration yet, so we can't calculate the looping offset.
    return mMaster->mOriginalDecodedDuration == media::TimeUnit::Zero();
  }

  void PutDataOnWaiting(MediaData* aData) {
    MOZ_ASSERT(!mDataWaitingTimestampAdjustment);
    mDataWaitingTimestampAdjustment = aData;
    SLOG("put %s [%" PRId64 ",%" PRId64 "] on waiting",
         MediaData::EnumValueToString(aData->mType),
         aData->mTime.ToMicroseconds(), aData->GetEndTime().ToMicroseconds());
    MaybeStopPrerolling();
  }

  bool ShouldDiscardLoopedData(MediaData::Type aType) const {
    if (!mMaster->mMediaSink->IsStarted()) {
      return false;
    }

    MOZ_DIAGNOSTIC_ASSERT(aType == MediaData::Type::AUDIO_DATA ||
                          aType == MediaData::Type::VIDEO_DATA);
    const bool isAudio = aType == MediaData::Type::AUDIO_DATA;
    if (isAudio && !mMaster->HasAudio()) {
      return false;
    }
    if (!isAudio && !mMaster->HasVideo()) {
      return false;
    }

    /**
     * If media cancels looping, we should check whether there is media data
     * whose time is later than EOS. If so, we should discard them because we
     * won't have a chance to play them.
     *
     *    playback                     last decoded
     *    position          EOS        data time
     *   ----|---------------|------------|---------> (Increasing timeline)
     *    mCurrent         looping      mMaster's
     *    ClockTime        offset      mDecodedXXXEndTime
     *
     */

    const auto offset =
        isAudio ? AudioQueue().GetOffset() : VideoQueue().GetOffset();
    const auto endTime =
        isAudio ? mMaster->mDecodedAudioEndTime : mMaster->mDecodedVideoEndTime;
    const auto clockTime = mMaster->GetClock();
    return (offset != media::TimeUnit::Zero() && clockTime < offset &&
            offset < endTime);
  }

  void DiscardLoopedData(MediaData::Type aType) {
    MOZ_DIAGNOSTIC_ASSERT(aType == MediaData::Type::AUDIO_DATA ||
                          aType == MediaData::Type::VIDEO_DATA);
    const bool isAudio = aType == MediaData::Type::AUDIO_DATA;
    const auto offset =
        isAudio ? AudioQueue().GetOffset() : VideoQueue().GetOffset();
    if (offset == media::TimeUnit::Zero()) {
      return;
    }

    SLOG("Discard %s frames after the time=%" PRId64,
         isAudio ? "audio" : "video", offset.ToMicroseconds());
    if (isAudio) {
      DiscardFramesFromTail(AudioQueue(), [&](int64_t aSampleTime) {
        return aSampleTime > offset.ToMicroseconds();
      });
    } else {
      DiscardFramesFromTail(VideoQueue(), [&](int64_t aSampleTime) {
        return aSampleTime > offset.ToMicroseconds();
      });
    }
  }

  void PushFakeAudioDataIfNeeded(const media::TimeUnit& aDuration) {
    MOZ_ASSERT(Info().HasAudio());

    const auto& audioInfo = Info().mAudio;
    CheckedInt64 frames = aDuration.ToTicksAtRate(audioInfo.mRate);
    if (!frames.isValid() || !audioInfo.mChannels || !audioInfo.mRate) {
      NS_WARNING("Can't create fake audio, invalid frames/channel/rate?");
      return;
    }

    if (!frames.value()) {
      NS_WARNING(nsPrintfCString("Duration (%s) too short, no frame needed",
                                 aDuration.ToString().get())
                     .get());
      return;
    }

    // If we can get the last sample, use its frame. Otherwise, use common 1024.
    int64_t typicalPacketFrameCount = 1024;
    if (RefPtr<AudioData> audio = AudioQueue().PeekBack()) {
      typicalPacketFrameCount = audio->Frames();
    }

    media::TimeUnit totalDuration = TimeUnit::Zero(audioInfo.mRate);
    // Generate fake audio in a smaller size of audio chunk.
    while (frames.value()) {
      int64_t packetFrameCount =
          std::min(frames.value(), typicalPacketFrameCount);
      frames -= packetFrameCount;
      AlignedAudioBuffer samples(packetFrameCount * audioInfo.mChannels);
      if (!samples) {
        NS_WARNING("Can't create audio buffer, OOM?");
        return;
      }
      // `mDecodedAudioEndTime` is adjusted time, and we want unadjusted time
      // otherwise the time would be adjusted twice when pushing sample into the
      // media queue.
      media::TimeUnit startTime = mMaster->mDecodedAudioEndTime;
      if (AudioQueue().GetOffset() != media::TimeUnit::Zero()) {
        startTime -= AudioQueue().GetOffset();
      }
      RefPtr<AudioData> data(new AudioData(0, startTime, std::move(samples),
                                           audioInfo.mChannels,
                                           audioInfo.mRate));
      SLOG("Created fake audio data (duration=%s, frame-left=%" PRId64 ")",
           data->mDuration.ToString().get(), frames.value());
      totalDuration += data->mDuration;
      HandleAudioDecoded(data);
    }
    SLOG("Pushed fake silence audio data in total duration=%" PRId64 "%s",
         totalDuration.ToMicroseconds(), totalDuration.ToString().get());
  }

  bool HasDecodedLastAudioFrame() const {
    // when we're going to leave looping state and have got EOS before, we
    // should mark audio queue as ended because we have got all data we need.
    return mAudioDataRequest.Exists() || mAudioSeekRequest.Exists() ||
           ShouldDiscardLoopedData(MediaData::Type::AUDIO_DATA) ||
           IsDataWaitingForTimestampAdjustment(MediaData::Type::AUDIO_DATA) ||
           mIsReachingAudioEOS;
  }

  bool HasDecodedLastVideoFrame() const {
    // when we're going to leave looping state and have got EOS before, we
    // should mark video queue as ended because we have got all data we need.
    return mVideoDataRequest.Exists() || mVideoSeekRequest.Exists() ||
           ShouldDiscardLoopedData(MediaData::Type::VIDEO_DATA) ||
           IsDataWaitingForTimestampAdjustment(MediaData::Type::VIDEO_DATA) ||
           mIsReachingVideoEOS;
  }

  bool ShouldStopPrerolling() const override {
    // These checks is used to handle the media queue aren't opened correctly
    // because they've been close before entering the looping state. Therefore,
    // we need to preroll data in order to let new data to reopen the queue
    // automatically. Otherwise, playback can't start successfully.
    bool isWaitingForNewData = false;
    if (mMaster->HasAudio()) {
      isWaitingForNewData |= (mIsReachingAudioEOS && AudioQueue().IsFinished());
    }
    if (mMaster->HasVideo()) {
      isWaitingForNewData |= (mIsReachingVideoEOS && VideoQueue().IsFinished());
    }
    return !isWaitingForNewData && DecodingState::ShouldStopPrerolling();
  }

  bool IsReaderSeeking() const {
    return mAudioSeekRequest.Exists() || mVideoSeekRequest.Exists();
  }

  bool IsWaitingData(MediaData::Type aType) const override {
    if (aType == MediaData::Type::AUDIO_DATA) {
      return mMaster->IsWaitingAudioData() ||
             IsDataWaitingForTimestampAdjustment(MediaData::Type::AUDIO_DATA);
    }
    MOZ_DIAGNOSTIC_ASSERT(aType == MediaData::Type::VIDEO_DATA);
    return mMaster->IsWaitingVideoData() ||
           IsDataWaitingForTimestampAdjustment(MediaData::Type::VIDEO_DATA);
  }

  bool IsRequestingDataFromStartPosition(MediaData::Type aType) const {
    MOZ_DIAGNOSTIC_ASSERT(aType == MediaData::Type::AUDIO_DATA ||
                          aType == MediaData::Type::VIDEO_DATA);
    if (aType == MediaData::Type::AUDIO_DATA) {
      return mAudioSeekRequest.Exists() || mAudioDataRequest.Exists();
    }
    return mVideoSeekRequest.Exists() || mVideoDataRequest.Exists();
  }

  bool IsBufferingAllowed() const override {
    return !mIsReachingAudioEOS && !mIsReachingVideoEOS;
  }

  bool mIsReachingAudioEOS;
  bool mIsReachingVideoEOS;

  /**
   * If we have both tracks which have different length, when one track ends
   * first, we can't adjust new data from that track if another longer track
   * hasn't ended yet. The adjusted timestamp needs to be based off the longer
   * track's last data's timestamp, because otherwise it would cause a deviation
   * and eventually a/v unsync. Those sample needs to be stored and we will
   * adjust their timestamp later.
   *
   * Following graph explains the situation in details.
   * o : decoded data with timestamp adjusted or no adjustment (not looping yet)
   * x : decoded data without timestamp adjustment.
   * - : stop decoding and nothing happens
   * EOS : the track reaches to the end. We now know the offset of the track.
   *
   * Timeline ----------------------------------->
   * Track1 :  o EOS x  -  -  o
   * Track2 :  o  o  o EOS o  o
   *
   * Before reaching track2's EOS, we can't adjust samples from track1 because
   * track2 might have longer duration than track1. The sample X would be
   * stored in `mDataWaitingTimestampAdjustment` and we would also stop decoding
   * for track1.
   *
   * After reaching track2's EOS, now we know another track's offset, and the
   * larger one would be used for `mOriginalDecodedDuration`. Once that duration
   * has been determined, we will no longer need to put samples on waiting
   * because we already know how to adjust timestamp.
   */

  RefPtr<MediaData> mDataWaitingTimestampAdjustment;

  MozPromiseRequestHolder<MediaFormatReader::SeekPromise> mAudioSeekRequest;
  MozPromiseRequestHolder<MediaFormatReader::SeekPromise> mVideoSeekRequest;
  MozPromiseRequestHolder<AudioDataPromise> mAudioDataRequest;
  MozPromiseRequestHolder<VideoDataPromise> mVideoDataRequest;

  // The media format reader only allows seeking a track at a time, if we're
  // already in seeking, then delay the new seek until the current one finishes.
--> --------------------

--> maximum size reached

--> --------------------

Messung V0.5
C=91 H=98 G=94

¤ Dauer der Verarbeitung: 0.15 Sekunden  ¤

*© Formatika GbR, Deutschland






Wurzel

Suchen

Beweissystem der NASA

Beweissystem Isabelle

NIST Cobol Testsuite

Cephes Mathematical Library

Wiener Entwicklungsmethode

Haftungshinweis

Die Informationen auf dieser Webseite wurden nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit, noch Qualität der bereit gestellten Informationen zugesichert.

Bemerkung:

Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.