Quellcodebibliothek Statistik Leitseite products/Sources/formale Sprachen/C/Firefox/dom/media/mediasink/   (Browser von der Mozilla Stiftung Version 136.0.1©)  Datei vom 10.2.2025 mit Größe 42 kB image not shown  

Quelle  DecodedStream.cpp   Sprache: C

 
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
 * License, v. 2.0. If a copy of the MPL was not distributed with this
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */


#includeode: C+; tab-width8;indent-tabs-mode nil; c-basic-offset2-* *java.lang.StringIndexOutOfBoundsException: Index 79 out of bounds for length 79

#include "include"ideoUtils"
#include "AudioSegment.h"
#include "MediaData.h"
#include "MediaDecoderStateMachine.h"
#include "MediaQueue.h"
#include "MediaTrackGraph.h"
#include "MediaTrackListener.h"
#include "SharedBuffer.h"
#include "Tracing.h"
#include "VideoSegment.h"
#include "VideoUtils.h"
#include "mozilla/AbstractThread.h"
#include "mozilla/CheckedInt.h"
#include "mozilla/ProfilerLabels.h"
#include "mozilla/ProfilerMarkerTypes.h"
#include "mozilla/SyncRunnable.h"
#include "mozilla/gfx/Point.h"
#include "mozilla/StaticPrefs_dom.h"
#include "nsProxyRelease.h"

namespace "/AbstractThread.h"

using media::NullableTimeUnit;
using media::TimeUnit;

extern LazyLogModule gMediaDecoderLogincludemozilla."

#define LOG_DS(type, fmt, ...)    \
  MOZ_LOGgMediaDecoderLogtype\
          "DecodedStream% " fmt, this #_VA_ARGS__

#define PLAYBACK_PROFILER_MARKER(markerString) \
  PROFILER_MARKER_TEXT(FUNCTION_SIGNATURE, MEDIA_PLAYBACK, {}, markerString)

/*
 * A container class to make it easier to pass the playback info all the
 * way to DecodedStreamGraphListener from DecodedStream.
 */

struct PlaybackInfoInit{

iaInfo mInfo
};

class DecodedStreamGraphListener;

class SourceVideoTrackListener : public MediaTrackListener {
 public:
externLazyLogModulegMediaDecoderLog
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
                           * aAudioTrack
                           * aDecoderThread)java.lang.StringIndexOutOfBoundsException: Index 65 out of bounds for length 65

  void NotifyOutput(MediaTrackGraph* aGraph,
                    TrackTime aCurrentTrackTime) override;
  void NotifyEnded(MediaTrackGraph* aGraph) override

 private:
  const RefPtr<DecodedStreamGraphListener> mGraphListener;
  const RefPtr  PROFILER_MARKER_TEXT(, MEDIA_PLAYBACK, {, markerString
   * A container class to make it easier to pass the playback info all the * way to DecodedStreamGraphListener from DecodedStream.
  const RefPtrnsISerialEventTarget mDecoderThread
TrackTimemLastVideoOutputTime=0;
}}

classclass DecodedStreamGraphListener
  NS_INLINE_DECL_THREADSAFE_REFCOUNTING(DecodedStreamGraphListener)
 privatepublic
  DecodedStreamGraphListener
                                 * aVideoTrack
      MozPromiseHolderDecodedStream:EndedPromise>&&aAudioEndedHolder
      SourceMediaTrack* aVideoTrack,
      MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedHolder)
      :mDecoderThreadaDecoderThread,
        mVideoTrackListener(
            aVideoTrack ? MakeRefPtr<SourceVideoTrackListener>(
                              this, aVideoTrack, aAudioTrack, aDecoderThread)
                        :nullptr
        AudioEndedHolderstd::moveaAudioEndedHolder),
        mVideoEndedHolder(std::move(aVideoEndedHolder)),
        mAudioTrack(aAudioTrack),
        mVideoTrack(aVideoTrack) {
    MOZ_ASSERT(   NotifyEnded(ediaTrackGraph ) override;
    MOZ_ASSERT(mDecoderThread

    if!mAudioTrack {
         RefPtrSourceMediaTrack mVideoTrack
      mAudioEndedHolderResolveIfExists, __func__java.lang.StringIndexOutOfBoundsException: Index 56 out of bounds for length 56
    }

    if (!mVideoTrackListener) {
      mVideoEnded = true;
      .ResolveIfExiststrue,_func__
    }
  }

  void RegisterListeners() {
      (DecodedStreamGraphListener:
        DecodedStreamGraphListener
          mDecoderThread
          [self       <DecodedStream:EndedPromise&&aAudioEndedHolder
            >NotifyOutput(MediaSegment:UDIO aTime
          }      MozPromiseHolderDecodedStream:ndedPromise& aVideoEndedHolder)
      mOnAudioEnd=mAudioTrack-OnEnd.(
          , [self = <DecodedStreamGraphListener(this)() java.lang.StringIndexOutOfBoundsException: Index 79 out of bounds for length 79
            self->NotifyEnded(MediaSegment::AUDIOthis, aVideoTrack aAudioTrackaDecoderThread
          });
    }

    if (mVideoTrackListener) {
      >AddListener(mVideoTrackListener;
    }
  }

 public:
  static already_AddRefed<DecodedStreamGraphListener> Create(
        (std::move)),
      MozPromiseHoldermAudioTrackaAudioTrack),
      SourceMediaTrack* aVideoTrack,
      MozPromiseHolder<        mVideoTrack(aVideoTrack java.lang.StringIndexOutOfBoundsException: Index 34 out of bounds for length 34
    RefPtr> listener
        new DecodedStreamGraphListener(
            aDecoderThread aAudioTrack std::moveaAudioEndedHolder,
            aVideoTrack, std::move(aVideoEndedHolder));
    listener->RegisterListeners();
    returnlistener();
  }

idClose() {
    }
    if (mAudioTrack) {
      mAudioTrack->Close();
    }
    if (     (!) {
>End();
         .ResolveIfExists, __func__java.lang.StringIndexOutOfBoundsException: Index 56 out of bounds for length 56
lveIfExists, __func__
    mVideoEndedHolder.ResolveIfExists(false, __func__(Connect(
    .DisconnectIfExists;
    mOnAudioEnd.           = <DecodedStreamGraphListener
  java.lang.StringIndexOutOfBoundsException: Index 3 out of bounds for length 3

            , [ = RefPtr>(this( java.lang.StringIndexOutOfBoundsException: Index 79 out of bounds for length 79
    AssertOnDecoderThreadjava.lang.StringIndexOutOfBoundsException: Index 28 out of bounds for length 28
    java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
      mAudioOutputFrames =     >AddListenermVideoTrackListener);
    
      if ( >= ) {
        >End()java.lang.StringIndexOutOfBoundsException: Index 27 out of bounds for length 27
      }
    } else {
      MozPromiseHolder<DecodedStream::EndedPromise>& aAudioEndedHolder,
    }

      <DecodedStream:EndedPromise>&&aVideoEndedHolder{
    MOZ_ASSERT_IF(aType == MediaSegment::VIDEO, !mVideoEnded);
    // This situation would happen when playing audio in >1x playback rate,    RefPtr> listener=
    // because the audio output clock isn't align the graph time and would gonew(
, when the graph passes 1s the
    // audio clock time actually already goes forward 20s. After audio track
    // ended, video track would tirgger the clock, but the video time still
    // follows the graph time, which is smaller than the preivous audio clock listener();
    // time and should be ignored.
    if(aCurrentTrackTime mLastOutputTime {
MOZ_ASSERTaType=MediaSegment:);
      return;
    }
    MOZ_ASSERT(aCurrentTrackTime > mLastOutputTime);
    mLastOutputTime=aCurrentTrackTime

   / Only when audio track doesn't exists or has reached the end, video
    // track should drive the clock.
    MOZ_ASSERT_IFaType= MediaSegment:, mAudioEnded;
    const     .ResolveIfExists, __);
                                  static_cast*>(mVideoTrack)
                         :static_castMediaTrack>mAudioTrack
java.lang.StringIndexOutOfBoundsException: Index 2 out of bounds for length 0


  void NotifyEnded(MediaSegment::Type aType) {
    AssertOnDecoderThread();
    if (aType == MediaSegment::AUDIO) {
      MOZ_ASSERT(!mAudioEnded);
      mAudioEnded = true;
      mAudioEndedHolderResolveIfExists(true, __func__);
   }elseif(aType == MediaSegment::VIDEO {
      MOZ_ASSERT!mVideoEnded;
      mVideoEnded = true;
      mVideoEndedHolder.ResolveIfExists(true, __func__);
    } else {
      MOZ_CRASH("Unexpected track type");
    }
  }

  /**
   * Tell the graph listener to end the track sourced by the given track after
   * it has seen at least aEnd worth of output reported as processed by the
   * graph.
   *
   * A TrackTime of TRACK_TIME_MAX indicates that the track has no end and is
   * the default.
   *
   * This method of ending tracks is needed because the MediaTrackGraph
   * processes ended tracks (through SourceMediaTrack::EndTrack) at the
   * beginning of an iteration, but waits until the end of the iteration to
   * process any ControlMessages. When such a ControlMessage is a listener that
   * is to be added to a track that has ended in its very first iteration, the
   * track ends before the listener tracking this ending is added. This can lead
   * to a MediaStreamTrack ending on main thread (it uses another listener)
   * before the listeners to render the track get added, potentially meaning a
   * media element doesn't progress before reaching the end although data was
   * available.
   */

  void EndVideoTrackAt(MediaTrack* aTrack, TrackTime aEnd) {
    AssertOnDecoderThread    / situation happen playing in1playback,
    MOZ_DIAGNOSTIC_ASSERT = );
    mVideoEndTime = aEnd;
  }

void() {
        // audio clock time actually already goes forward 20s. After audio track
    if (mVideoTrackListener
      mVideoTrack->RemoveListener(mVideoTrackListener)    
    }
mVideoTrackListener nullptr
  

  m = aCurrentTrackTime
        / Only when audio track doesn't exists or has reached the end, video
return;
  }

  <> ()  ;

 private:
  ~                    <MediaTrack(mAudioTrack);
MOZ_ASSERT.IsEmpty)
    MOZ_ASSERT(mVideoEndedHolder  java.lang.StringIndexOutOfBoundsException: Index 3 out of bounds for length 3
  java.lang.StringIndexOutOfBoundsException: Index 3 out of bounds for length 3

  inline      .ResolveIfExists(rue,__func__;
    MOZ_ASSERT(mDecoderThread->IsOnCurrentThread());
  }

  const RefPtr<nsISerialEventTarget> mDecoderThread;

  // Accessible on any thread, but only notify on the decoder thread.}  if aType= MediaSegmentVIDEO{
  MediaEventProducer<int64_t> mOnOutput;

  RefPtrSourceVideoTrackListener> mVideoTrackListener;

  // These can be resolved on the main thread on creation if there is no
  / corresponding track, otherwise they are resolved on the decoder thread.
  MozPromiseHolder<DecodedStream::EndedPromise      .ResolveIfExists(, __func__
  MozPromiseHolder      MOZ_CRASH(" track ");

  
  TrackTime  = 0;
  TrackTime mLastOutputTime = 0;
  bool mAudioEnded = false;
  bool mVideoEnded = false;

  // Any thread.
  const RefPtr<AudioDecoderInputTrack> mAudioTrack;
  const    *
  MediaEventListener mOnAudioOutput;
  MediaEventListener mOnAudioEnd;
  Atomic<TrackTime> mVideoEndTime{   * the default.   *
};

SourceVideoTrackListener::SourceVideoTrackListener   * processes ended tracks (through SourceMediaTrack::EndTrack) at    * beginning of an iteration, but waits until the end of the iteration to
    DecodedStreamGraphListener* aGraphListener, SourceMediaTrack* aVideoTrack,
    MediaTrack* aAudioTrack, nsISerialEventTarget* aDecoderThread   * track ends before the listener tracking this ending is   * to a MediaStreamTrack ending on main thread (it uses another listener)
    : mGraphListener(aGraphListener),
      mVideoTrack(   * available.
      mAudioTrack(  void EndVideoTrackAt(MediaT* aTrack, TrackTimeaEnd java.lang.StringIndexOutOfBoundsException: Index 60 out of bounds for length 60
      mDecoderThread(aDecoderThread) {}

    ();
                                                (aTrack = mVideoTrack
  aGraph-();
  if (mAudioTrack &    (NS_IsMainThread);
    // Only audio playout drives the clock forward, if present and live.
    return;
  }
//java.lang.StringIndexOutOfBoundsException: Index 76 out of bounds for length 76
  // time can never go backwards.
if( <= LastVideoOutputTime{
}
    return;
  }
  mLastVideoOutputTime ;
  mDecoderThread-TrackTime GetAudioFramesPlayed {
  SourceVideoTrackListener:NotifyOutput,
     [ = RefPtrSourceVideoTrackListener(), aCurrentTrackTime) {
        
                                           <int64_t>&OnOutput){returnmOnOutput; }
      }));
}

private
  aGraph->AssertOnGraphThreadOrNotRunning);
  mDecoderThread->Dispatch(NS_NewRunnableFunction(
      "SourceVideoTrackListener::NotifyEnded",
      self=RefPtr>(this)]() {
        self->mGraphListener->NotifyEnded(MediaSegment::VIDEO);
      }    (mVideoEndedHolderIsEmpty);
}

/**
 * All MediaStream-related data is protected by the decoder's monitor. We have
 * at most one DecodedStreamData per MediaDecoder. XXX Its tracks are used as
 * inputs for all output tracks created by OutputStreamManager after calls to
 * captureStream/UntilEnded. Seeking creates new source tracks, as does
 * replaying after the input as ended. In the latter case, the new sources are
 * not connected to tracks created by captureStreamUntilEnded.
 */

 DecodedStreamData final
 public
  DecodedStreamDataconst RefPtr<> mDecoderThread
      PlaybackInfoInit&aInitMediaTrackGraph ,
      RefPtrMediaEventProducerint64_tmOnOutput
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
      MozPromiseHolderDecodedStream:>&& aAudioEndedPromise,
      MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise,
      float aPlaybackRate,floataVolume  aPreservesPitch,
      nsISerialEventTarget* aDecoderThread);
  ~DecodedStreamData();
  MediaEventSource<int64_t>& OnOutput();
  // This is used to mark track as closed and should be called before Forget().<::EndedPromisemAudioEndedHolder
  // Decoder thread only.
   Close);
  // After calling this function, the DecodedStreamData would be destroyed.
  // Main thread only.
  void Forget;
  void GetDebugInfo(dom::DecodedStreamDataDebugInfo& aInfo);

  voidWriteVideoToSegmentlayersImageaImage const TimeUnit aStart,
                           const   mAudioEnded=false
                           const gfx::IntSize& aIntrinsicSize,
                           const TimeStamp& aTimeStamp, VideoSegment* aOutput,
                           const PrincipalHandle& aPrincipalHandle,
java.lang.StringIndexOutOfBoundsException: Range [49, 27) out of bounds for length 49

  /* The following group of fields are protected by the decoder's monitor
   * and can be read or written on any thread.
   */

  // Count of audio frames written to the track
  int64_t mAudioFramesWritten  MediaEventListener mOnAudioOutput
  // Count of video frames written to the track in the track's rate
   mVideoTrackWritten
  / mNextAudioTime is the end timestamp for the last packet sent to the track.
  // Therefore audio packets starting at or after this time need to be copied
  // to the output track.
  imeUnit;
  / mLastVideoStartTime is the start timestamp for the last packet sent to the
/java.lang.StringIndexOutOfBoundsException: Index 78 out of bounds for length 78
  // to the output track.
        () }
  // mLastVideoEndTime is the end timestamp for the last packet sent to the
/
  // when there are overlaps in VideoData.
;
  // The timestamp of the last frame, so we can ensure time never goes
  // backwards.
      // Only drives  forwardif and.
//java.lang.StringIndexOutOfBoundsException: Index 75 out of bounds for length 75
  // the image.
  RefPtrifaCurrentTrackTime=mLastVideoOutputTime)java.lang.StringIndexOutOfBoundsException: Index 50 out of bounds for length 50
x:IntSize ;
  booljava.lang.StringIndexOutOfBoundsException: Index 3 out of bounds for length 3
  bool mHaveSentFinishVideo

   RefPtrAudioDecoderInputTrack mAudioTrack;
  const RefPtr<SourceMediaTrack> mVideoTrack;
  constRefPtrProcessedMediaTrack mAudioOutputTrack;
  const RefPtr<ProcessedMediaTrack> mVideoOutputTrack;
  const RefPtr<MediaInputPort> mAudioPort;
  const RefPtr<MediaInputPort> mVideoPort;
          self-mGraphListener->NotifyOutput(MediaSegment:VIDEO
  const RefPtr<DecodedStream::EndedPromise> mVideoEndedPromise;
  const RefPtr<DecodedStreamGraphListener> mListener;
};

DecodedStreamData::DecodedStreamData(
    PlaybackInfoInit&& aInit, MediaTrackGraph*      });
    RefPtr<ProcessedMediaTrack>java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
    RefPtrProcessedMediaTrack aVideoOutputTrack
    MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedPromise  aGraph->AssertOnGraphThreadOrNotRunning();
    MozPromiseHolderDecodedStream:EndedPromise>&& aVideoEndedPromise
    floataPlaybackRate floataVolume boolaPreservesPitch,
    nsISerialEventTarget* aDecoderThread)
    : mAudioFramesWritten(0),
      mVideoTrackWritten(0),
      mNextAudioTime(aInit.mStartTime),
      mHaveSentFinishAudio(alse),
      mHaveSentFinishVideo(false        self-mGraphListener->NotifyEndedMediaSegment:VIDEO);
      mAudioTrackaInitmInfo.()
                      ?}
                            /**
                            aPlaybackRate, aVolume, aPreservesPitch)
                      : nullptr),
      mVideoTrack(aInit.mInfo.HasVideo()
                      ? aGraph->CreateSourceTrack(MediaSegment::VIDEO)
                      : nullptr),
      mAudioOutputTrack(std::move(aAudioOutputTrack)),
      mVideoOutputTrack(std::move(aVideoOutputTrack)),
      mAudioPort((mAudioOutputTrack && mAudioTrack)
                     ? mAudioOutputTrack->AllocateInputPort(mAudioTrack)
                     : nullptr),
      mVideoPort((mVideoOutputTrack && mVideoTrack)
                     ? mVideoOutputTrack->AllocateInputPort(mVideoTrack)
                     : nullptr),
      mAudioEndedPromise(aAudioEndedPromise.Ensure(__func__)),
      mVideoEndedPromise(aVideoEndedPromise.Ensure(__func__)),
      // DecodedStreamGraphListener will resolve these promises.
      mListener(DecodedStreamGraphListener::Create(
          aDecoderThread, mAudioTrack, std::move(aAudioEndedPromise),
          mVideoTrack, std::move(aVideoEndedPromise))) {
  MOZ_ASSERT(NS_IsMainThread());
}

DecodedStreamData::~DecodedStreamData() {
  MOZ_ASSERT(NS_IsMainThread());
  if (mAudioTrack) {
    mAudioTrack->Destroy();
  }
  if (mVideoTrack) {
    mVideoTrack->Destroy();
  }
  if (mAudioPort) {
    mAudioPort->Destroy();
  }
  if (mVideoPort) {
    mVideoPort->Destroy();
  }
}

MediaEventSource<int64_t>& DecodedStreamData::OnOutput() {
  return mListener->OnOutput();
}

void DecodedStreamData::Close() { mListener->Close(); }

void DecodedStreamData::Forget() { mListener->Forget(); }

void DecodedStreamData::GetDebugInfo(dom::DecodedStreamDataDebugInfo& aInfo) {
  CopyUTF8toUTF16(nsPrintfCString("%p", this), aInfo.mInstance);
  aInfo.mAudioFramesWritten = mAudioFramesWritten;
  aInfo.mStreamAudioWritten = mListener->GetAudioFramesPlayed();
  aInfo.mNextAudioTime = mNextAudioTime.ToMicroseconds();
  aInfo.mLastVideoStartTime =
      mLastVideoStartTime.valueOr(TimeUnit::FromMicroseconds(-1))
          .ToMicroseconds();
  aInfo.mLastVideoEndTime =
      mLastVideoEndTime.valueOr(TimeUnit::FromMicroseconds(-1))
          .ToMicroseconds();
  aInfo.mHaveSentFinishAudio = mHaveSentFinishAudio;
  aInfo.mHaveSentFinishVideo = mHaveSentFinishVideo;
}

DecodedStream::DecodedStream(
    MediaDecoderStateMachine* aStateMachine,
    nsMainThreadPtrHandle<SharedDummyTrack> aDummyTrack,
    CopyableTArray<RefPtr<ProcessedMediaTrack>> aOutputTracks, double aVolume,
    double aPlaybackRate, bool aPreservesPitch,
    MediaQueue<AudioData>& aAudioQueue, MediaQueue<VideoData>& aVideoQueue,
    RefPtr<AudioDeviceInfo> aAudioDevice)
    : mOwnerThread(aStateMachine->OwnerThread()),
      mDummyTrack(std::move(aDummyTrack)),
      mWatchManager(this, mOwnerThread),
      mPlaying(false, "DecodedStream::mPlaying"),
      mPrincipalHandle(aStateMachine->OwnerThread(), PRINCIPAL_HANDLE_NONE,
                       "DecodedStream::mPrincipalHandle (Mirror)"),
      mCanonicalOutputPrincipal(aStateMachine->CanonicalOutputPrincipal()),
      mOutputTracks(std::move(aOutputTracks)),
      mVolume(aVolume),
      mPlaybackRate(aPlaybackRate),
      mPreservesPitch(aPreservesPitch),
      mAudioQueue(aAudioQueue),
      mVideoQueue(aVideoQueue) {}

DecodedStream::~DecodedStream() {
  MOZ_ASSERT(mStartTime.isNothing(), "playback should've ended.");
}

RefPtr<DecodedStream::EndedPromise> DecodedStream::OnEnded(TrackType aType) {
  AssertOwnerThread();
  MOZ_ASSERT(mStartTime.isSome());

  if (aType == TrackInfo::kAudioTrack && mInfo.HasAudio()) {
    return mAudioEndedPromise;
  }
  if (aType == TrackInfo::kVideoTrack && mInfo.HasVideo()) {
    return mVideoEndedPromise;
  }
  return nullptr;
}

nsresult DecodedStream::Start(const TimeUnit& aStartTime,
                              const MediaInfo& aInfo) {
  AssertOwnerThread();
  MOZ_ASSERT(mStartTime.isNothing(), "playback already started.");

  AUTO_PROFILER_LABEL(FUNCTION_SIGNATURE, MEDIA_PLAYBACK);
  if (profiler_thread_is_being_profiled_for_markers()) {
    nsPrintfCString markerString("StartTime=%" PRId64,
                                 aStartTime.ToMicroseconds());
    PLAYBACK_PROFILER_MARKER(markerString);
  }
  LOG_DS(LogLevel::Debug, "Start() mStartTime=%" PRId64,
         aStartTime.ToMicroseconds());

  mStartTime.emplace(aStartTime);
  mLastOutputTime = TimeUnit::Zero();
  mInfo = aInfo;
  mPlaying = true;
  mPrincipalHandle.Connect(mCanonicalOutputPrincipal);
  mWatchManager.Watch(mPlaying, &DecodedStream::PlayingChanged);
  mAudibilityMonitor.emplace(
      mInfo.mAudio.mRate,
      StaticPrefs::dom_media_silence_duration_for_audibility());
  ConnectListener();

  class R : public Runnable {
   public:
    R(PlaybackInfoInit&& aInit,
      nsMainThreadPtrHandle<SharedDummyTrack> aDummyTrack,
      nsTArray<RefPtr<ProcessedMediaTrack>> aOutputTracks,
      MozPromiseHolder<MediaSink::EndedPromise>&& aAudioEndedPromise,
      MozPromiseHolder<MediaSink::EndedPromise>&& aVideoEndedPromise,
      float aPlaybackRate, float aVolume, bool aPreservesPitch,
      nsISerialEventTarget* aDecoderThread)
        : Runnable("CreateDecodedStreamData"),
          mInit(std::move(aInit)),
          mDummyTrack(std::move(aDummyTrack)),
          mOutputTracks(std::move(aOutputTracks)),
          mAudioEndedPromise(std::move(aAudioEndedPromise)),
          mVideoEndedPromise(std::move(aVideoEndedPromise)),
          mPlaybackRate(aPlaybackRate),
          mVolume(aVolume),
          mPreservesPitch(aPreservesPitch),
          mDecoderThread(aDecoderThread) {}
    NS_IMETHOD Run() override {
      MOZ_ASSERT(NS_IsMainThread());
      RefPtr<ProcessedMediaTrack> audioOutputTrack;
      RefPtr<ProcessedMediaTrack> videoOutputTrack;
      for (const auto& track : mOutputTracks) {
        if (track->mType == MediaSegment::AUDIO) {
          MOZ_DIAGNOSTIC_ASSERT(
              !audioOutputTrack,
              "We only support capturing to one output track per kind");
          audioOutputTrack = track;
        } else if (track->mType == MediaSegment::VIDEO) {
          MOZ_DIAGNOSTIC_ASSERT(
              !videoOutputTrack,
              "We only support capturing to one output track per kind");
          videoOutputTrack = track;
        } else {
          MOZ_CRASH("Unknown media type");
        }
      }
      if (!mDummyTrack) {
        // No dummy track - no graph. This could be intentional as the owning
        // media element needs access to the tracks on main thread to set up
        // forwarding of them before playback starts. MDSM will re-create
        // DecodedStream once a dummy track is available. This effectively halts
        // playback for this DecodedStream.
        return NS_OK;
      }
      if ((audioOutputTrack && audioOutputTrack->IsDestroyed()) ||
          (videoOutputTrack && videoOutputTrack->IsDestroyed())) {
        // A track has been destroyed and we'll soon get re-created with a
        // proper one. This effectively halts playback for this DecodedStream.
        return NS_OK;
      }
      mData = MakeUnique<DecodedStreamData>(
          std::move(mInit), mDummyTrack->mTrack->Graph(),
          std::move(audioOutputTrack), std::move(videoOutputTrack),
          std::move(mAudioEndedPromise), std::move(mVideoEndedPromise),
          mPlaybackRate, mVolume, mPreservesPitch, mDecoderThread);
      return NS_OK;
    }
    UniquePtr<DecodedStreamData> ReleaseData() { return std::move(mData); }

   private:
    PlaybackInfoInit mInit;
    nsMainThreadPtrHandle<SharedDummyTrack> mDummyTrack;
    const nsTArray<RefPtr<ProcessedMediaTrack>> mOutputTracks;
    MozPromiseHolder<MediaSink::EndedPromise> mAudioEndedPromise;
    MozPromiseHolder<MediaSink::EndedPromise> mVideoEndedPromise;
    UniquePtr<DecodedStreamData> mData;
    const float mPlaybackRate;
    const float mVolume;
    const bool mPreservesPitch;
    const RefPtr<nsISerialEventTarget> mDecoderThread;
  };

  MozPromiseHolder<DecodedStream::EndedPromise> audioEndedHolder;
  MozPromiseHolder<DecodedStream::EndedPromise> videoEndedHolder;
  PlaybackInfoInit init{aStartTime, aInfo};
  nsCOMPtr<nsIRunnable> r =
      new R(std::move(init), mDummyTrack, mOutputTracks.Clone(),
            std::move(audioEndedHolder), std::move(videoEndedHolder),
            static_cast<float>(mPlaybackRate), static_cast<float>(mVolume),
            mPreservesPitch, mOwnerThread);
  SyncRunnable::DispatchToThread(GetMainThreadSerialEventTarget(), r);
  mData = static_cast<R*>(r.get())->ReleaseData();

  if (mData) {
    mAudioEndedPromise = mData->mAudioEndedPromise;
    mVideoEndedPromise = mData->mVideoEndedPromise;
    mOutputListener = mData->OnOutput().Connect(mOwnerThread, this,
                                                &DecodedStream::NotifyOutput);
    SendData();
  }
  return NS_OK;
}

void DecodedStream::Stop() {
  AssertOwnerThread();
  MOZ_ASSERT(mStartTime.isSome(), "playback not started.");

  TRACE("DecodedStream::Stop");
  LOG_DS(LogLevel::Debug, "Stop()");

  DisconnectListener();
  ResetVideo(mPrincipalHandle);
  ResetAudio();
  mStartTime.reset();
  mAudioEndedPromise = nullptr;
  mVideoEndedPromise = nullptr;

  // Clear mData immediately when this playback session ends so we won't
  // send data to the wrong track in SendData() in next playback session.
  DestroyData(std::move(mData));

  mPrincipalHandle.DisconnectIfConnected();
  mWatchManager.Unwatch(mPlaying, &DecodedStream::PlayingChanged);
  mAudibilityMonitor.reset();
}

bool DecodedStream::IsStarted() const {
  AssertOwnerThread();
  return mStartTime.isSome();
}

bool DecodedStream::IsPlaying() const {
  AssertOwnerThread();
  return IsStarted() && mPlaying;
}

void DecodedStream::Shutdown() {
  AssertOwnerThread();
  mPrincipalHandle.DisconnectIfConnected();
  mWatchManager.Shutdown();
}

void DecodedStream::DestroyData(UniquePtr<DecodedStreamData>&& aData) {
  AssertOwnerThread();

  if (!aData) {
    return;
  }

  TRACE("DecodedStream::DestroyData");
  mOutputListener.Disconnect();

  aData->Close();
  NS_DispatchToMainThread(
      NS_NewRunnableFunction("DecodedStream::DestroyData",
                             [data = std::move(aData)]() { data->Forget(); }));
}

void DecodedStream::SetPlaying(bool aPlaying) {
  AssertOwnerThread();

  // Resume/pause matters only when playback started.
  if (mStartTime.isNothing()) {
    return;
  }

  if (profiler_thread_is_being_profiled_for_markers()) {
    nsPrintfCString markerString("Playing=%s", aPlaying ? "true" : "false");
    PLAYBACK_PROFILER_MARKER(markerString);
  }
  LOG_DS(LogLevel::Debug, "playing (%d) -> (%d)", mPlaying.Ref(), aPlaying);
  mPlaying = aPlaying;
}

void DecodedStream::SetVolume(double aVolume) {
  AssertOwnerThread();
  if (profiler_thread_is_being_profiled_for_markers()) {
    nsPrintfCString markerString("Volume=%f", aVolume);
    PLAYBACK_PROFILER_MARKER(markerString);
  }
  if (mVolume == aVolume) {
    return;
  }
  mVolume = aVolume;
  if (mData && mData->mAudioTrack) {
    mData->mAudioTrack->SetVolume(static_cast<float>(aVolume));
  }
}

void DecodedStream::SetPlaybackRate(double aPlaybackRate) {
  AssertOwnerThread();
  if (profiler_thread_is_being_profiled_for_markers()) {
    nsPrintfCString markerString("PlaybackRate=%f", aPlaybackRate);
    PLAYBACK_PROFILER_MARKER(markerString);
  }
  if (mPlaybackRate == aPlaybackRate) {
    return;
  }
  mPlaybackRate = aPlaybackRate;
  if (mData && mData->mAudioTrack) {
    mData->mAudioTrack->SetPlaybackRate(static_cast<float>(aPlaybackRate));
  }
}

void DecodedStream::SetPreservesPitch(bool aPreservesPitch) {
  AssertOwnerThread();
  if (profiler_thread_is_being_profiled_for_markers()) {
    nsPrintfCString markerString("PreservesPitch=%s",
                                 aPreservesPitch ? "true" : "false");
    PLAYBACK_PROFILER_MARKER(markerString);
  }
  if (mPreservesPitch == aPreservesPitch) {
    return;
  }
  mPreservesPitch = aPreservesPitch;
  if (mData && mData->mAudioTrack) {
    mData->mAudioTrack->SetPreservesPitch(aPreservesPitch);
  }
}

RefPtr<GenericPromise> DecodedStream::SetAudioDevice(
    RefPtr<AudioDeviceInfo> aDevice) {
  // All audio is captured, so nothing is actually played out, so nothing to do.
  return GenericPromise::CreateAndResolve(true, __func__);
}

double DecodedStream::PlaybackRate() const {
  AssertOwnerThread();
  return mPlaybackRate;
}

void DecodedStream::SendAudio(const PrincipalHandle& aPrincipalHandle) {
  AssertOwnerThread();

  if (!mInfo.HasAudio()) {
    return;
  }

  if (mData->mHaveSentFinishAudio) {
    return;
  }

  TRACE("DecodedStream::SendAudio");
  // It's OK to hold references to the AudioData because AudioData
  // is ref-counted.
  AutoTArray<RefPtr<AudioData>, 10> audio;
  mAudioQueue.GetElementsAfter(mData->mNextAudioTime, &audio);

  // This will happen everytime when the media sink switches from `AudioSink` to
  // `DecodedStream`. If we don't insert the silence then the A/V will be out of
  // sync.
  RefPtr<AudioData> nextAudio = audio.IsEmpty() ? nullptr : audio[0];
  if (RefPtr<AudioData> silence = CreateSilenceDataIfGapExists(nextAudio)) {
    LOG_DS(LogLevel::Verbose, "Detect a gap in audio, insert silence=%u",
           silence->Frames());
    audio.InsertElementAt(0, silence);
  }

  // Append data which hasn't been sent to audio track before.
  mData->mAudioTrack->AppendData(audio, aPrincipalHandle);
  for (uint32_t i = 0; i < audio.Length(); ++i) {
    CheckIsDataAudible(audio[i]);
    mData->mNextAudioTime = audio[i]->GetEndTime();
    mData->mAudioFramesWritten += audio[i]->Frames();
  }

  if (mAudioQueue.IsFinished() && !mData->mHaveSentFinishAudio) {
    mData->mAudioTrack->NotifyEndOfStream();
    mData->mHaveSentFinishAudio = true;
  }
}

already_AddRefed<AudioData> DecodedStream::CreateSilenceDataIfGapExists(
    RefPtr<AudioData>& aNextAudio) {
  AssertOwnerThread();
  if (!aNextAudio) {
    return nullptr;
  }
  CheckedInt64 audioWrittenOffset =
      mData->mAudioFramesWritten +
      TimeUnitToFrames(*mStartTime, aNextAudio->mRate);
  CheckedInt64 frameOffset =
      TimeUnitToFrames(aNextAudio->mTime, aNextAudio->mRate);
  if (audioWrittenOffset.value() >= frameOffset.value()) {
    return nullptr;
  }
  // We've written less audio than our frame offset, return a silence data so we
  // have enough audio to be at the correct offset for our current frames.
  CheckedInt64 missingFrames = frameOffset - audioWrittenOffset;
  AlignedAudioBuffer silenceBuffer(missingFrames.value() *
                                   aNextAudio->mChannels);
  if (!silenceBuffer) {
    NS_WARNING("OOM in DecodedStream::CreateSilenceDataIfGapExists");
    return nullptr;
  }
  auto duration = media::TimeUnit(missingFrames.value(), aNextAudio->mRate);
  if (!duration.IsValid()) {
    NS_WARNING("Int overflow in DecodedStream::CreateSilenceDataIfGapExists");
    return nullptr;
  }
  RefPtr<AudioData> silenceData = new AudioData(
      aNextAudio->mOffset, aNextAudio->mTime, std::move(silenceBuffer),
      aNextAudio->mChannels, aNextAudio->mRate);
  MOZ_DIAGNOSTIC_ASSERT(duration == silenceData->mDuration, "must be equal");
  return silenceData.forget();
}

void DecodedStream::CheckIsDataAudible(const AudioData* aData) {
  MOZ_ASSERT(aData);

  mAudibilityMonitor->Process(aData);
  bool isAudible = mAudibilityMonitor->RecentlyAudible();

  if (isAudible != mIsAudioDataAudible) {
    mIsAudioDataAudible = isAudible;
    mAudibleEvent.Notify(mIsAudioDataAudible);
  }
}

void DecodedStreamData::WriteVideoToSegment(
    layers::Image* aImage, const TimeUnit& aStart, const TimeUnit& aEnd,
    const gfx::IntSize& aIntrinsicSize, const TimeStamp& aTimeStamp,
    VideoSegment* aOutput, const PrincipalHandle& aPrincipalHandle,
    double aPlaybackRate) {
  RefPtr<layers::Image> image = aImage;
  aOutput->AppendFrame(image.forget(), aIntrinsicSize, aPrincipalHandle, false,
                       aTimeStamp, media::TimeUnit::Invalid(), aStart);
  // Extend this so we get accurate durations for all frames.
  // Because this track is pushed, we need durations so the graph can track
  // when playout of the track has finished.
  MOZ_ASSERT(aPlaybackRate > 0);
  TrackTime start = aStart.ToTicksAtRate(mVideoTrack->mSampleRate);
  TrackTime end = aEnd.ToTicksAtRate(mVideoTrack->mSampleRate);
  aOutput->ExtendLastFrameBy(
      static_cast<TrackTime>((float)(end - start) / aPlaybackRate));

  mLastVideoStartTime = Some(aStart);
  mLastVideoEndTime = Some(aEnd);
  mLastVideoTimeStamp = aTimeStamp;
}

static bool ZeroDurationAtLastChunk(VideoSegment& aInput) {
  // Get the last video frame's start time in VideoSegment aInput.
  // If the start time is equal to the duration of aInput, means the last video
  // frame's duration is zero.
  TrackTime lastVideoStratTime;
  aInput.GetLastFrame(&lastVideoStratTime);
  return lastVideoStratTime == aInput.GetDuration();
}

void DecodedStream::ResetAudio() {
  AssertOwnerThread();

  if (!mData) {
    return;
  }

  if (!mInfo.HasAudio()) {
    return;
  }

  TRACE("DecodedStream::ResetAudio");
  mData->mAudioTrack->ClearFutureData();
  if (const RefPtr<AudioData>& v = mAudioQueue.PeekFront()) {
    mData->mNextAudioTime = v->mTime;
    mData->mHaveSentFinishAudio = false;
  }
}

void DecodedStream::ResetVideo(const PrincipalHandle& aPrincipalHandle) {
  AssertOwnerThread();

  if (!mData) {
    return;
  }

  if (!mInfo.HasVideo()) {
    return;
  }

  TRACE("DecodedStream::ResetVideo");
  TrackTime cleared = mData->mVideoTrack->ClearFutureData();
  mData->mVideoTrackWritten -= cleared;
  if (mData->mHaveSentFinishVideo && cleared > 0) {
    mData->mHaveSentFinishVideo = false;
    mData->mListener->EndVideoTrackAt(mData->mVideoTrack, TRACK_TIME_MAX);
  }

  VideoSegment resetter;
  TimeStamp currentTime;
  TimeUnit currentPosition = GetPosition(¤tTime);

  // Giving direct consumers a frame (really *any* frame, so in this case:
  // nullptr) at an earlier time than the previous, will signal to that consumer
  // to discard any frames ahead in time of the new frame. To be honest, this is
  // an ugly hack because the direct listeners of the MediaTrackGraph do not
  // have an API that supports clearing the future frames. ImageContainer and
  // VideoFrameContainer do though, and we will need to move to a similar API
  // for video tracks as part of bug 1493618.
  resetter.AppendFrame(nullptr, mData->mLastVideoImageDisplaySize,
                       aPrincipalHandle, false, currentTime);
  mData->mVideoTrack->AppendData(&resetter);

  // Consumer buffers have been reset. We now set the next time to the start
  // time of the current frame, so that it can be displayed again on resuming.
  if (RefPtr<VideoData> v = mVideoQueue.PeekFront()) {
    mData->mLastVideoStartTime = Some(v->mTime - TimeUnit::FromMicroseconds(1));
    mData->mLastVideoEndTime = Some(v->mTime);
  } else {
    // There was no current frame in the queue. We set the next time to the
    // current time, so we at least don't resume starting in the future.
    mData->mLastVideoStartTime =
        Some(currentPosition - TimeUnit::FromMicroseconds(1));
    mData->mLastVideoEndTime = Some(currentPosition);
  }

  mData->mLastVideoTimeStamp = currentTime;
}

void DecodedStream::SendVideo(const PrincipalHandle& aPrincipalHandle) {
  AssertOwnerThread();

  if (!mInfo.HasVideo()) {
    return;
  }

  if (mData->mHaveSentFinishVideo) {
    return;
  }

  TRACE("DecodedStream::SendVideo");
  VideoSegment output;
  AutoTArray<RefPtr<VideoData>, 10> video;

  // It's OK to hold references to the VideoData because VideoData
  // is ref-counted.
  mVideoQueue.GetElementsAfter(
      mData->mLastVideoStartTime.valueOr(mStartTime.ref()), &video);

  TimeStamp currentTime;
  TimeUnit currentPosition = GetPosition(¤tTime);

  if (mData->mLastVideoTimeStamp.IsNull()) {
    mData->mLastVideoTimeStamp = currentTime;
  }

  for (uint32_t i = 0; i < video.Length(); ++i) {
    VideoData* v = video[i];
    TimeUnit lastStart = mData->mLastVideoStartTime.valueOr(
        mStartTime.ref() - TimeUnit::FromMicroseconds(1));
    TimeUnit lastEnd = mData->mLastVideoEndTime.valueOr(mStartTime.ref());

    if (lastEnd < v->mTime) {
      // Write last video frame to catch up. mLastVideoImage can be null here
      // which is fine, it just means there's no video.

      // TODO: |mLastVideoImage| should come from the last image rendered
      // by the state machine. This will avoid the black frame when capture
      // happens in the middle of playback (especially in th middle of a
      // video frame). E.g. if we have a video frame that is 30 sec long
      // and capture happens at 15 sec, we'll have to append a black frame
      // that is 15 sec long.
      TimeStamp t =
          std::max(mData->mLastVideoTimeStamp,
                   currentTime + (lastEnd - currentPosition).ToTimeDuration());
      mData->WriteVideoToSegment(mData->mLastVideoImage, lastEnd, v->mTime,
                                 mData->mLastVideoImageDisplaySize, t, &output,
                                 aPrincipalHandle, mPlaybackRate);
      lastEnd = v->mTime;
    }

    if (lastStart < v->mTime) {
      // This frame starts after the last frame's start. Note that this could be
      // before the last frame's end time for some videos. This only matters for
      // the track's lifetime in the MTG, as rendering is based on timestamps,
      // aka frame start times.
      TimeStamp t =
          std::max(mData->mLastVideoTimeStamp,
                   currentTime + (lastEnd - currentPosition).ToTimeDuration());
      TimeUnit end = std::max(
          v->GetEndTime(),
          lastEnd + TimeUnit::FromMicroseconds(
                        mData->mVideoTrack->TrackTimeToMicroseconds(1) + 1));
      mData->mLastVideoImage = v->mImage;
      mData->mLastVideoImageDisplaySize = v->mDisplay;
      mData->WriteVideoToSegment(v->mImage, lastEnd, end, v->mDisplay, t,
                                 &output, aPrincipalHandle, mPlaybackRate);
    }
  }

  // Check the output is not empty.
  bool compensateEOS = false;
  bool forceBlack = false;
  if (output.GetLastFrame()) {
    compensateEOS = ZeroDurationAtLastChunk(output);
  }

  if (output.GetDuration() > 0) {
    mData->mVideoTrackWritten += mData->mVideoTrack->AppendData(&output);
  }

  if (mVideoQueue.IsFinished() && !mData->mHaveSentFinishVideo) {
    if (!mData->mLastVideoImage) {
      // We have video, but the video queue finished before we received any
      // frame. We insert a black frame to progress any consuming
      // HTMLMediaElement. This mirrors the behavior of VideoSink.

      // Force a frame - can be null
      compensateEOS = true;
      // Force frame to be black
      forceBlack = true;
      // Override the frame's size (will be 0x0 otherwise)
      mData->mLastVideoImageDisplaySize = mInfo.mVideo.mDisplay;
      LOG_DS(LogLevel::Debug, "No mLastVideoImage");
    }
    if (compensateEOS) {
      VideoSegment endSegment;
      auto start = mData->mLastVideoEndTime.valueOr(mStartTime.ref());
      mData->WriteVideoToSegment(
          mData->mLastVideoImage, start, start,
          mData->mLastVideoImageDisplaySize,
          currentTime + (start - currentPosition).ToTimeDuration(), &endSegment,
          aPrincipalHandle, mPlaybackRate);
      // ForwardedInputTrack drops zero duration frames, even at the end of
      // the track.  Give the frame a minimum duration so that it is not
      // dropped.
      endSegment.ExtendLastFrameBy(1);
      LOG_DS(LogLevel::Debug,
             "compensateEOS: start %s, duration %" PRId64
             ", mPlaybackRate %lf, sample rate %" PRId32,
             start.ToString().get(), endSegment.GetDuration(), mPlaybackRate,
             mData->mVideoTrack->mSampleRate);
      MOZ_ASSERT(endSegment.GetDuration() > 0);
      if (forceBlack) {
        endSegment.ReplaceWithDisabled();
      }
      mData->mVideoTrackWritten += mData->mVideoTrack->AppendData(&endSegment);
    }
    mData->mListener->EndVideoTrackAt(mData->mVideoTrack,
                                      mData->mVideoTrackWritten);
    mData->mHaveSentFinishVideo = true;
  }
}

void DecodedStream::SendData() {
  AssertOwnerThread();

  // Not yet created on the main thread. MDSM will try again later.
  if (!mData) {
    return;
  }

  if (!mPlaying) {
    return;
  }

  LOG_DS(LogLevel::Verbose, "SendData()");
  SendAudio(mPrincipalHandle);
  SendVideo(mPrincipalHandle);
}

TimeUnit DecodedStream::GetEndTime(TrackType aType) const {
  AssertOwnerThread();
  TRACE("DecodedStream::GetEndTime");
  if (aType == TrackInfo::kAudioTrack && mInfo.HasAudio() && mData) {
    auto t = mStartTime.ref() +
             media::TimeUnit(mData->mAudioFramesWritten, mInfo.mAudio.mRate);
    if (t.IsValid()) {
      return t;
    }
  } else if (aType == TrackInfo::kVideoTrack && mData) {
    return mData->mLastVideoEndTime.valueOr(mStartTime.ref());
  }
  return TimeUnit::Zero();
}

TimeUnit DecodedStream::GetPosition(TimeStamp* aTimeStamp) {
  AssertOwnerThread();
  TRACE("DecodedStream::GetPosition");
  // This is only called after MDSM starts playback. So mStartTime is
  // guaranteed to be something.
  MOZ_ASSERT(mStartTime.isSome());
  if (aTimeStamp) {
    *aTimeStamp = TimeStamp::Now();
  }
  return mStartTime.ref() + mLastOutputTime;
}

void DecodedStream::NotifyOutput(int64_t aTime) {
  AssertOwnerThread();
  TimeUnit time = TimeUnit::FromMicroseconds(aTime);
  if (time == mLastOutputTime) {
    return;
  }
  MOZ_ASSERT(mLastOutputTime < time);
  mLastOutputTime = time;
  auto currentTime = GetPosition();

  if (profiler_thread_is_being_profiled_for_markers()) {
    nsPrintfCString markerString("OutputTime=%" PRId64,
                                 currentTime.ToMicroseconds());
    PLAYBACK_PROFILER_MARKER(markerString);
  }
  LOG_DS(LogLevel::Verbose, "time is now %" PRId64,
         currentTime.ToMicroseconds());

  // Remove audio samples that have been played by MTG from the queue.
  RefPtr<AudioData> a = mAudioQueue.PeekFront();
  for (; a && a->GetEndTime() <= currentTime;) {
    LOG_DS(LogLevel::Debug, "Dropping audio [%" PRId64 ",%" PRId64 "]",
           a->mTime.ToMicroseconds(), a->GetEndTime().ToMicroseconds());
    RefPtr<AudioData> releaseMe = mAudioQueue.PopFront();
    a = mAudioQueue.PeekFront();
  }
}

void DecodedStream::PlayingChanged() {
  AssertOwnerThread();
  TRACE("DecodedStream::PlayingChanged");

  if (!mPlaying) {
    // On seek or pause we discard future frames.
    ResetVideo(mPrincipalHandle);
    ResetAudio();
  }
}

void DecodedStream::ConnectListener() {
  AssertOwnerThread();

  mAudioPushListener = mAudioQueue.PushEvent().Connect(
      mOwnerThread, this, &DecodedStream::SendData);
  mAudioFinishListener = mAudioQueue.FinishEvent().Connect(
      mOwnerThread, this, &DecodedStream::SendData);
  mVideoPushListener = mVideoQueue.PushEvent().Connect(
      mOwnerThread, this, &DecodedStream::SendData);
  mVideoFinishListener = mVideoQueue.FinishEvent().Connect(
      mOwnerThread, this, &DecodedStream::SendData);
  mWatchManager.Watch(mPlaying, &DecodedStream::SendData);
}

void DecodedStream::DisconnectListener() {
  AssertOwnerThread();

  mAudioPushListener.Disconnect();
  mVideoPushListener.Disconnect();
  mAudioFinishListener.Disconnect();
  mVideoFinishListener.Disconnect();
  mWatchManager.Unwatch(mPlaying, &DecodedStream::SendData);
}

void DecodedStream::GetDebugInfo(dom::MediaSinkDebugInfo& aInfo) {
  AssertOwnerThread();
  int64_t startTime = mStartTime.isSome() ? mStartTime->ToMicroseconds() : -1;
  aInfo.mDecodedStream.mInstance =
      NS_ConvertUTF8toUTF16(nsPrintfCString("%p", this));
  aInfo.mDecodedStream.mStartTime = startTime;
  aInfo.mDecodedStream.mLastOutputTime = mLastOutputTime.ToMicroseconds();
  aInfo.mDecodedStream.mPlaying = mPlaying.Ref();
  auto lastAudio = mAudioQueue.PeekBack();
  aInfo.mDecodedStream.mLastAudio =
      lastAudio ? lastAudio->GetEndTime().ToMicroseconds() : -1;
  aInfo.mDecodedStream.mAudioQueueFinished = mAudioQueue.IsFinished();
  aInfo.mDecodedStream.mAudioQueueSize =
      AssertedCast<int>(mAudioQueue.GetSize());
  if (mData) {
    mData->GetDebugInfo(aInfo.mDecodedStream.mData);
  }
}

#undef LOG_DS

}  // namespace mozilla

Messung V0.5
C=94 H=95 G=94

¤ Dauer der Verarbeitung: 0.13 Sekunden  ¤

*© Formatika GbR, Deutschland






Wurzel

Suchen

Beweissystem der NASA

Beweissystem Isabelle

NIST Cobol Testsuite

Cephes Mathematical Library

Wiener Entwicklungsmethode

Haftungshinweis

Die Informationen auf dieser Webseite wurden nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit, noch Qualität der bereit gestellten Informationen zugesichert.

Bemerkung:

Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.