Anforderungen  |   Konzepte  |   Entwurf  |   Entwicklung  |   Qualitätssicherung  |   Lebenszyklus  |   Steuerung
 
 
 
 


Quelle  DecodedStream.cpp   Sprache: C

 
ode +: ;:nil:  * /
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
 * License, v. 2.0. If a copy of the MPL was not distributed with this
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */


#include "DecodedStream.h"

#include "AudioDecoderInputTrack.h"
#include "AudioSegment.h"
#include "MediaData.h"
#include "MediaDecoderStateMachine.h"
#include "MediaQueue.h"
#include "MediaTrackGraph.h"
#include "MediaTrackListener.h"
#include "SharedBuffer.h"
#include "Tracing.h"
#include "VideoSegment.h"
"ideoUtils.hjava.lang.StringIndexOutOfBoundsException: Range [23, 24) out of bounds for length 23
#include mozilla
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
"/ProfilerLabelsh
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
#include "MOZ_LOG(, , \(=p"fmt,#_))
#includejava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
#include "mozilla/StaticPrefs_dom.h"
#include "nsProxyRelease.h"

namespace mozilla  

  Med;
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0

  ;

#define LOG_DS(typeMediaTrack,
  MOZ_LOG(gMediaDecoderLognsISerialEventTarget;
          ("DecodedStream=%p " fmt, this, java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0

#define java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
FUNCTION_SIGNATURE } )

/*
 * A container class to make it easier to pass the playback info all the
 * way to DecodedStreamGraphListener from DecodedStream.
 */

struct PlaybackInfoInit {
  TimeUnit<> ;
  MediaInfo      ;
;

class;

classNS_INLINE_DECL_THREADSAFE_REFCOUNTING
 :
  SourceVideoTrackListener(
SourceMediaTrack,
                           MediaTrack* aAudioTrack<:EndedPromise ,
                           :()java.lang.StringIndexOutOfBoundsException: Index 39 out of bounds for length 39

  void NotifyOutput:),
                    TrackTimem(std())java.lang.StringIndexOutOfBoundsException: Index 56 out of bounds for length 56
void(*aGraph

 private();
     ()
const<> ;
  const      .(true);
  const RefPtr
  java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
};mVideoEndedHolder(true _);

class DecodedStreamGraphListenerjava.lang.StringIndexOutOfBoundsException: Index 5 out of bounds for length 5
NS_INLINE_DECL_THREADSAFE_REFCOUNTING)
 private
(
                ,
MozPromiseHolder:> ,
      SourceMediaTrack* aVideoTrackself-::, );
      <::>&aVideoEndedHolder
      : mDecoderThread  >()ConnectmDecoderThreadselfRefPtr>this]){
        mVideoTrackListener(
            aVideoTrack ? MakeRefPtr<SourceVideoTrackListener>(
                               , , )
                        
        mAudioEndedHoldermVideoTrack-()java.lang.StringIndexOutOfBoundsException: Index 52 out of bounds for length 52
mVideoEndedHolder:(aVideoEndedHolder
        (aAudioTrack
mVideoTrack){
    MOZ_ASSERT(NS_IsMainThread());
    MOZ_ASSERT(mDecoderThread)<DecodedStreamGraphListener =

    if ,,std()java.lang.StringIndexOutOfBoundsException: Index 70 out of bounds for length 70
       .forget
  vo Close
    java.lang.StringIndexOutOfBoundsException: Index 5 out of bounds for length 5

ifmVideoTrackListener
      mVideoEnded End
 mVideoEndedHolder(true);
    }
  }

  void RegisterListeners()(false);
    if (mAudioTrack) {
      mOnAudioOutput = mAudioTrack->OnOutput(.Connect
          mDecoderThreadmOnAudioOutput()
[selfRefPtr>(this)](TrackTime aTime) {
            self-}
          });
      mOnAudioEnd = mAudioTrack->OnEnd().Connect(
          mDecoderThreadself<DecodedStreamGraphListener)]){
            self->NotifyEnded(MediaSegment::AUDIO    ();
          });
    }

    if (mVideoTrackListener) {
     mVideoTrack-(mVideoTrackListener
    }
  }

 publicaCurrentTrackTimemVideoEndTime
  staticmVideoTrack-(;
      nsISerialEventTarget* aDecoderThread, AudioDecoderInputTrack
MozPromiseHolderDecodedStreamEndedPromise&aAudioEndedHolder
      SourceMediaTrack
MozPromiseHolder:EndedPromise ) java.lang.StringIndexOutOfBoundsException: Index 74 out of bounds for length 74
    <DecodedStreamGraphListener java.lang.StringIndexOutOfBoundsException: Index 49 out of bounds for length 49
         DecodedStreamGraphListener
            aDecoderThread, aAudioTrack,     // forward faster. Eg. playback rate=2 time1,
            
    listener-> 
    return.forget
  }

  void Close
    AssertOnDecoderThread  <=){
    if      ( = :VIDEO
      mAudioTrack->Close  ;
    }
     /java.lang.StringIndexOutOfBoundsException: Index 73 out of bounds for length 73
      mVideoTrack-( = :VIDEO)
    }
mAudioEndedHolder(falsefunc__
    mVideoEndedHolder? <MediaTrackmVideoTrack
    mOnAudioOutput.DisconnectIfExists();
              <*();
  }

  void NotifyOutput(MediaSegmentjava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
    .true;
        aType:){
      mAudioOutputFrames(!)java.lang.StringIndexOutOfBoundsException: Index 31 out of bounds for length 31
    } else java.lang.StringIndexOutOfBoundsException: Index 5 out of bounds for length 5
      if    * it has seen at   * java.lang.StringIndexOutOfBoundsException: Index 10 out of bounds for length 4
        mVideoTrack->End();
      }
    } else {
      MOZ_CRASH("Unexpected track type");
    }

    MOZ_ASSERT_IF(aType   * is to be added to    * track ends before the    * to a MediaStreamTrack ending on main   * before the listeners to render the track   * media element doesn't progress before reaching the end although data was
    MOZ_ASSERT_IF(aType == MediaSegment::VIDEO, !mVideoEnded);
//This would when audio >x  rate
    // because the audio output clock isn't align the graph time and would go(aTrack=mVideoTrack
   Forget

    // ended, video track would tirgger the clock, but the video time still
    // follows the graph time, which is smaller than the preivous audio clock
// time and should be ignored.
    if
     =nullptr;
      return  }
    }
    MOZ_ASSERT(aCurrentTrackTime
    LastOutputTime;

//
    // track should drive the clock.
    MOZ_ASSERT_IF(aType ==      mAudioOutputFrames
    
                                  MediaEventSourceint64_t&OnOutput {returnmOnOutput }
              : static_cast*>
    mOnOutput    (mAudioEndedHolderIsEmpty();
  }

  void NotifyEnded(MediaSegment::Type aType) {
    AssertOnDecoderThread();
    if   }
      MOZ_ASSERT(!mAudioEnded);
      mAudioEnded
mAudioEndedHolder( )java.lang.StringIndexOutOfBoundsException: Index 56 out of bounds for length 56
    else( = ::) java.lang.StringIndexOutOfBoundsException: Index 46 out of bounds for length 46
        <SourceVideoTrackListenerjava.lang.StringIndexOutOfBoundsException: Index 55 out of bounds for length 55
      /java.lang.StringIndexOutOfBoundsException: Index 76 out of bounds for length 76
mVideoEndedHoldertrue);
    } else {
      MOZ_CRASHUnexpectedtype
    }
  }

  /**  mAudioOutputFrames;
   * Tell the graph listener to end the track sourced by the given track after
   * it has seen at least aEnd worth of output reported as processed by the
   * graph.
   *
   * A TrackTime of TRACK_TIME_MAX indicates that the track has no end and is
   * the default.
   *
   * This method of ending tracks is needed because the MediaTrackGraph
   * processes ended tracks (through SourceMediaTrack::EndTrack) at the
   * beginning of an iteration, but waits until the end of the iteration to
   * process any ControlMessages. When such a ControlMessage is a listener that
   * is to be added to a track that has ended in its very first iteration, the
   * track ends before the listener tracking this ending is added. This can lead
   * to a MediaStreamTrack ending on main thread (it uses another listener)
   * before the listeners to render the track get added, potentially meaning a
   * media element doesn't progress before reaching the end although data was
   * available.
   */

rackaTrack ){
AssertOnDecoderThread
MOZ_DIAGNOSTIC_ASSERT =);
    mVideoEndTime = aEnd;
  }

  void Forget>AssertOnGraphThreadOrNotRunning
MOZ_ASSERT()java.lang.StringIndexOutOfBoundsException: Index 34 out of bounds for length 34
       The graph can iterate without time advancing, but the invariant is that
         aCurrentTrackTimem) {
    
    ;
  }
 =aCurrentTrackTime
  TrackTime() 
    ":NotifyOutput"
     self<>this]( 


  MediaEventSource> ()  mOnOutputjava.lang.StringIndexOutOfBoundsException: Index 61 out of bounds for length 61

 :
  ~aGraph-()java.lang.StringIndexOutOfBoundsException: Index 44 out of bounds for length 44
    MOZ_ASSERT[ =<SourceVideoTrackListener) java.lang.StringIndexOutOfBoundsException: Index 57 out of bounds for length 57
MOZ_ASSERT.()java.lang.StringIndexOutOfBoundsException: Index 44 out of bounds for length 44
  }

  inline void * at most one DecodedStreamData per * inputs for all output * captureStream/UntilEnded. as ended. In the latter case, the new sources are
    classDecodedStreamData {
  }

  constRefPtrnsISerialEventTarget;

  & , *aGraph
  <> ;

  RefPtr<SourceVideoTrackListener> mVideoTrackListener;

  // These can be resolved on the main thread on creation if there is no<:EndedPromiseaAudioEndedPromise
  // corresponding track, otherwise they are resolved on the decoder thread.  ,booljava.lang.StringIndexOutOfBoundsException: Index 63 out of bounds for length 63
  MozPromiseHolderDecodedStream> ;
  MozPromiseHolder<DecodedStream::EndedPromise> mVideoEndedHolder;

  // Decoder thread only.void()java.lang.StringIndexOutOfBoundsException: Index 15 out of bounds for length 15
    void Forget()java.lang.StringIndexOutOfBoundsException: Index 16 out of bounds for length 16
  TrackTime mLastOutputTime (::* ,const&aStart
  bool =;
  java.lang.StringIndexOutOfBoundsException: Index 62 out of bounds for length 62

                             double aPlaybackRate);
  const java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
  const java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
MediaEventListener;
  MediaEventListener mOnAudioEnd
  Atomic<TrackTime> mVideoEndTime{TrackTime;
}/java.lang.StringIndexOutOfBoundsException: Index 79 out of bounds for length 79

SourceVideoTrackListener::SourceVideoTrackListener// to the output track.
    T mNextAudioTime
    MediaTrack/java.lang.StringIndexOutOfBoundsException: Index 79 out of bounds for length 79
    : mGraphListener(  / track. Therefore video packets starting after this time need to be copied
      mVideoTrack
      mAudioTrack(aAudioTrack),
mDecoderThreadaDecoderThread{java.lang.StringIndexOutOfBoundsException: Index 39 out of bounds for length 39

void SourceVideoTrackListener::NotifyOutput  // track. It is used to adjust durations of chunks sent to the output track
                                            TrackTime aCurrentTrackTime  NullableTimeUnit mLastVideoEndTime
  aGraph->AssertOnGraphThreadOrNotRunning();
  if// backwards.
 audio playout theclock,  present live
    return;
  }
     The last video image sent to the track. Useful if we need to replicate
  // time can never go backwards.
   ( <=mLastVideoOutputTime {
    MOZ_ASSERT(aCurrentTrackTime == mLastVideoOutputTime);
    x:IntSizemLastVideoImageDisplaySize
  }
  mLastVideoOutputTime = aCurrentTrackTime;
  mDecoderThread->Dispatch;
      const<>mAudioTrack
      [ <>java.lang.StringIndexOutOfBoundsException: Index 54 out of bounds for length 54
>NotifyOutput:,
  const RefPtr<DecodedStream::EndedPromise>EndedPromisejava.lang.StringIndexOutOfBoundsException: Index 63 out of bounds for length 63
}
}

void SourceVideoTrackListener::NotifyEnded<> ,
AssertOnGraphThreadOrNotRunning
  mDecoderThread-<:,
       , ,  java.lang.StringIndexOutOfBoundsException: Index 61 out of bounds for length 61
      (
>>(:VIDEO
      }(.HasAudio



 * All                            aPlaybackRate,                      : nullptr),
 * at most       mVideoOutputTrack(std::move      mAudioPort((mAudioOutputTrack && mAudioTrack                     ? mAudioOutputTrack->AllocateInputPort(mAudioTrack)
 * inputs for all output tracks created by OutputStreamManager after calls to
 * captureStream/UntilEnded. Seeking creates      mAudioEndedPromise(aAudioEndedPromise.Ensure(      mVideoEndedPromise(aVideoEndedPromise.Ensure(__func__)),
 * replaying          aDecoderThread, mAudioTrack, std::move(aAudioEndedPromise
 * not }
 *java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
class   if (mVideoTrack    mVideoTrack->Destroy();
 public:
  DecodedStreamData  }
  if (mVideoPort) stroy();
      RefPtr  }
      RefPtr<ProcessedMediaTrack
      MozPromiseHolderMediaEventSource<int64_t>& DecodedStreamData::OnOutput() {
}
      float
      nsISerialEventTarget* aDecoderThread);
  java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
  MediaEventSource<int64_t>& OnOutput();
  // This is used to mark track as closed and should be called before Forget().
  // Decoder thread only.
  void  CopyUTF8toUTF16(nsPrintfCString("%p"this  aInfo.mAudioFramesWritten =   aInfo.mStreamAudioWritten = mListener-  aInfo.mNextAudioTime = java.lang.StringIndexOutOfBoundsException: Range [0, 39) out of bounds for length 29
  java.lang.StringIndexOutOfBoundsException: Range [0, 5) out of bounds for length 0
  // Main thread only.
  void Forget(    RefPtr<AudioDeviceInfo> aAudioDevice    : mOwnerThread(aStateMachine->OwnerThread      mDummyTrack      mWatchManager(this      mPlaying      mPrincipalHandle(aStateMachine-                       "DecodedStream::mPrincipalHandle (Mirror)"),
  void GetDebugInfo(dom::DecodedStreamDataDebugInfo

  void WriteVideoToSegment(layers::Image
                           const TimeUnit& aEnd,
                           const gfx::IntSizejava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
                           const TimeStamp& aTimeStamp    return mVideoEndedPromise;
                           const 
                           double aPlaybackRate);

  /* The following group of fields are protected by the decoder's monitor
   * and can be read or written on any thread.
   */

    if (profiler_thread_is_being_profiled_for_markers()) {
  int64_t                                  aStartTime.ToMicroseconds());
  // Count of video frames written to the track in the track's rate  }
  TrackTime mVideoTrackWritten;
  // mNextAudioTime is the end timestamp for the last packet sent to the track.
  mStartTime.emplace(aStartTime);
  // to the output track.
  TimeUnit mNextAudioTime;
  // mLastVideoStartTime is the start timestamp for the last packet sent to the
  // track. Therefore video packets starting after this time need to be copied  mPrincipalHandle.Connect(mCanonicalOutputPrincipal);
  // to the output track.
  NullableTimeUnit mLastVideoStartTime;
  // mLastVideoEndTime is the end timestamp for the last packet sent to the
  java.lang.StringIndexOutOfBoundsException: Range [0, 67) out of bounds for length 64
   public:
  NullableTimeUnit mLastVideoEndTime;
        nsMainThreadPtrHandle<SharedDummyTrack> aDummyTrack,
  // backwards.
  TimeStamp mLastVideoTimeStamp;
  // The last video image sent to the track. Useful if we need to replicate
      float aPlaybackRate, float aVolume, bool aPreservesPitch,
  RefPtr<layers:        : Runnable("CreateDecodedStreamData"),
  gfx::IntSize          mDummyTrack(std::move(aDummyTrack)),
  bool mHaveSentFinishAudio;
  bool mHaveSentFinishVideo;

  const RefPtr<AudioDecoderInputTrack          mPlaybackRate(aPlaybackRate),
            mPreservesPitch(aPreservesPitch),
  const RefPtr<ProcessedMediaTrack> mAudioOutputTrack;
  const     NS_IMETHOD Run() override {
  const RefPtr<MediaInputPort> mAudioPort;
  const RefPtr<MediaInputPort> mVideoPort;
  const RefPtr<DecodedStream:      for (const auto& track : mOutputTracks)       if (track->mType =          MOZ_DIAGNOSTIC_ASSERT(
  const        } else if (track->mType ==          MOZ_DIAGNOSTIC_ASSERT(
  const RefPtr<DecodedStreamGraphListener>          videoOutputTrack = track;
};

DecodedStreamData::DecodedStreamData        }
    PlaybackInfoInit&& aInit,       if (!mDummyTrack) {
    RefPtr<ProcessedMediaTrack> aAudioOutputTrack,
    RefPtr        // media element needs access to the tracks on main thread to set up
    MozPromiseHolder<DecodedStream::EndedPromise>&& aAudioEndedPromise,
    MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise,
            // playback for this DecodedStream.
    nsISerialEventTarget* aDecoderThread)
    : mAudioFramesWritten(0),
      mVideoTrackWritten(0),
      mNextAudioTime(aInit.mStartTime),
      mHaveSentFinishAudio(false),
      mHaveSentFinishVideo(false),
      mAudioTrack(aInit.mInfo.HasAudio          (videoOutputTrack && videoOutputTrack->IsDestroyed())) {
                      ? AudioDecoderInputTrack        // proper one. This effectively halts playback for this DecodedStream.
                            aGraph, aDecoderThread,      mData = MakeUnique<DecodedStreamData>(
                            aPlaybackRate, aVolume, aPreservesPitch          std::move(audioOutputTrack), std::move(videoOutputTrack),
                      : nullptr),
      mVideoTrack(aInit.mInfo.HasVideo()
                      ? aGraph->CreateSourceTrack(MediaSegment::VIDEO)
                      : nullptr),
      mAudioOutputTrack    UniquePtr<DecodedStreamData> ReleaseData() { return std::move
      mVideoOutputTrack    PlaybackInfoInit mInit;
      mAudioPort((mAudioOutputTrack && mAudioTrack)
                     ? mAudioOutputTrack->   MozPromiseHolder<MediaSink::EndedPromise> mAudioEndedPromise;
                     : nullptr),
          const float mPlaybackRate;
    const bool mPreservesPitch    const RefPtr<  };
                     : nullptr),
      mAudioEndedPromise(aAudioEndedPromise.Ensure(__func__  SyncRunnable::DispatchToThread(GetMainThreadSerialEventTarget(), r);
      mVideoEndedPromise(aVideoEndedPromise.  if (mData) {
      // DecodedStreamGraphListener will resolve these promises.    mVideoEndedPromise = mData->    mOutputListener = mData->OnOutput                                                &DecodedStream::NotifyOutput);
      mListener(DecodedStreamGraphListener::Create(
          aDecoderThread, mAudioTrack, std::move(aAudioEndedPromise),
  MOZ_ASSERT(mStartTime.isSome(), "playback not started."
  MOZ_ASSERT(NS_IsMainThread());
}

DecodedStreamData::~DecodedStreamData  ResetVideo(mPrincipalHandle);
  MOZ_ASSERT(NS_IsMainThread())  mStartTime.reset();
  if   mVideoEndedPromise = nullptr;
    mAudioTrack->Destroy();
  }
  if (mVideoTrack) {
    mVideoTrack->
  }
  if (mAudioPort  mWatchManager.Unwatch(mPlaying, &DecodedStream  mAudibilityMonitor.reset();
    mAudioPort-bool DecodedStream::IsStarted() const {
  }
  if (mVideoPort) 
    mVideoPort->Destroybool DecodedStream::IsPlaying() const {
  }
}

MediaEventSource<int64_t>& DecodedStreamData::OnOutput() {
  return mListener->OnOutput();
}

void DecodedStreamData::Close() { mListener->Close(); }

void DecodedStreamData::Forget() { mListener->Forget(); }

void DecodedStreamData::GetDebugInfo(dom::DecodedStreamDataDebugInfo& aInfo) {
  CopyUTF8toUTF16(nsPrintfCString("%p"this), aInfo.
  aInfo.mAudioFramesWritten = mAudioFramesWritten;
  aInfo.  mPrincipalHandle.DisconnectIfConnected();
}
  aInfo.mLastVideoStartTime =
      mLastVideoStartTime.valueOr(  AssertOwnerThread();
          .ToMicroseconds();
  aInfo.mLastVideoEndTime =
      mLastVideoEndTime.  TRACE("DecodedStream: mOutputListener.Disconnect();
          .ToMicroseconds();
  aInfo.mHaveSentFinishAudio = mHaveSentFinishAudio;
  aInfo.mHaveSentFinishVideo = mHaveSentFinishVideo
}

DecodedStream::  AssertOwnerThread();
    MediaDecoderStateMachine* aStateMachine,
    nsMainThreadPtrHandle<SharedDummyTrack> aDummyTrack,
    CopyableTArray<RefPtr<ProcessedMediaTrack    return;
    double  if (profiler_thread_is_being_profiled_for_markers()) {
    MediaQueue<AudioData>& aAudioQueue, MediaQueue    PLAYBACK_PROFILER_MARKER(markerString);
    RefPtr  LOG_DS(LogLevel::Debug, "playing (%d) -> (%d)", mPlaying.Ref(), aPlaying);
    : mOwnerThread(  mPlaying = aPlaying;
}
      mWatchManager(this,void DecodedStream::SetVolume(double aVolume) {
      mPlaying(false"DecodedStream::mPlaying nsPrintfCString markerString("Volume=%f", aVolume);
      mPrincipalHandle(  if (mVolume == aVolume) {
                       "DecodedStream::mPrincipalHandle mVolume = aVolume;
          mData->mAudioTrack->SetVolume(static_cast<float>(aVolume));
      mOutputTracks(std::move(aOutputTracks)),
      mVolume(aVolume)void DecodedStream::SetPlaybackRate(double aPlaybackRate) {
      mPlaybackRate(aPlaybackRate),
      mPreservesPitch(aPreservesPitch),
      mAudioQueue(aAudioQueue),
      mVideoQueue(aVideoQueue) {  }

DecodedStream::~DecodedStream() 
  MOZ_ASSERT  }
}


void DecodedStream::SetPreservesPitch(bool aPreservesPitch  AssertOwnerThread();
  MOZ_ASSERT(mStartTime.isSome());

  if (aType == TrackInfo    PLAYBACK_PROFILER_MARKER(markerString);
    return mAudioEndedPromise;
  }
  if (aType == TrackInfo::kVideoTrack  if (mData && mData->mAudioTrack) {
    return mVideoEndedPromise;
  }
  return nullptr;
}

nsresult DecodedStream::Start(const TimeUnit& aStartTime,
                              }
  AssertOwnerThread();
  MOZ_ASSERT(mStartTime.isNothing(), "java.lang.StringIndexOutOfBoundsException: Index 46 out of bounds for length 23

java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
  if (profiler_thread_is_being_profiled_for_markers    return;
    nsPrintfCString markerString  TRACE("DecodedStream::SendAudio");
                                 aStartTime  // is ref-counted.
    PLAYBACK_PROFILER_MARKER(markerString);
  }
  LOG_DS  // This will happen everytime when the media sink switches from `AudioSink` to
         aStartTime.ToMicroseconds(  // `DecodedStream`. If we don't insert the silence then the A/V will be out of

  mStartTime.emplace(aStartTime);
  mLastOutputTime =     LOG_DS(LogLevel::Verbose, "Detect a gap in audio, insert silence->Frames());
  mInfo =java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
      mData->mNextAudioTime = audio    mData->mAudioFramesWritten += audio  }
  mPrincipalHandle    mData->mAudioTrack-    mData->mHaveSentFinishAudio = true;
  mWatchManager.Watchalready_AddRefed<AudioData> DecodedStream::CreateSilenceDataIfGapExists(
  mAudibilityMonitor  AssertOwnerThread();
      mInfo.mAudio    return nullptr;
  CheckedInt64 audioWrittenOffset =
  ConnectListener      mData->mAudioFramesWritten +

  class R : public   CheckedInt64 frameOffset =
   public:
    R(PlaybackInfoInit&& aInit,
          return nullptr;
      nsTArray  // We've written less audio than our frame offset, return a silence data so we
      MozPromiseHolder<MediaSink::EndedPromise  AlignedAudioBuffer silenceBuffer(missingFrames                                   aNextAudio->mChannels);
      MozPromiseHolder<MediaSink::EndedPromise>&& aVideoEndedPromise,
      float aPlaybackRate, float    return nullptr;
  auto duration = media::TimeUnit(missingFrames  if (!duration.IsValid    NS_WARNING("Int overflow in DecodedStream::CreateSilenceDataIfGapExists return nullptr;
        : Runnable("CreateDecodedStreamData"),
          mInit(std::annels, aNextAudio->mRate);
          mDummyTrack(std::move(aDummyTrack  return silenceData.forget}
          mOutputTracks(std::move(aOutputTracks  MOZ_ASSERT(aData);
          mAudioEndedPromise(std::move  bool isAudible = mAudibilityMonitor->java.lang.StringIndexOutOfBoundsException: Index 42 out of bounds for length 0
          mVideoEndedPromise(std::move(    layers::Image* aImage, const TimeUnit    const gfx::IntSize&&nbsp;aIntrinsicSize, const    VideoSegment* aOutput, const PrincipalHandle& aPrincipalHandle    double aPlaybackRate) {
            MOZ_ASSERT(aPlaybackRate > 0  TrackTime start = aStart.  TrackTime end = aEnd.ToTicksAtRate(mVideoTrack->mSampleRate);
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
static bool ZeroDurationAtLastChunk(VideoSegment& aInput) {  // Get the last video frame's start time in VideoSegment aInput.
          mDecoderThread  // frame's duration is zero.
    NS_IMETHOD Run() override  return lastVideoStratTime == aInput.GetDuration}
      MOZ_ASSERT(NS_IsMainThread  AssertOwnerThread();
      RefPtr<ProcessedMediaTrack    return;
      RefPtr<ProcessedMediaTrack>java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
      for  }
        if (track->mType == MediaSegment::AUDIO) {
          MOZ_DIAGNOSTIC_ASSERT(
              !  TRACE("DecodedStream::ResetAudio");
  if (const RefPtr<AudioData>& v = mAudioQueue.PeekFront    mData->mNextAudioTime = v->mTime;
          audioOutputTrack = track;
        } else if (track-void DecodedStream::ResetVideo(const java.lang.StringIndexOutOfBoundsException: Index 52 out of bounds for length 22
            if (!mInfo.HasVideo()) {
              !videoOutputTrack  }
              "We only TRACE("DecodedStream::ResetVideo");
          videoOutputTrack = track;
        } else {
            if (mData->mHaveSentFinishVideo && cleared > 0) {
        }
      }
      if (!mDummyTrack) {
        // No dummy track - no graph. This could be intentional as the owning  VideoSegment resetter;
        java.lang.StringIndexOutOfBoundsException: Range [0, 34) out of bounds for length 0
        // forwarding of them before playback starts. MDSM will re-create
        // DecodedStream once a dummy track is available. This effectively halts  // VideoFrameContainer do though, and we will need to move to a similar API
        // playback for this DecodedStream.
        return NS_OK;
      }
      if ((audioOutputTrack &  mData->mVideoTrack->AppendData(&resetter);
          (videoOutputTrack && videoOutputTrack->  // time of the current frame, so that it can be displayed again on resuming.
        // A track has been destroyed and we'll soon get re-created with a
        // proper one. This effectively halts playback for this DecodedStream.
        return NS_OK;
      }
      mData = MakeUnique<DecodedStreamData>(
          std::move        Some(currentPosition - TimeUnit::FromMicroseconds    mData->mLastVideoEndTime = Some(currentPosition);
          LastVideoTimeStamp = currentTime;
          std::move(mAudioEndedPromise), java.lang.StringIndexOutOfBoundsException: Index 43 out of bounds for length 0
            if (!mInfo.HasVideo()) {
      return NS_OK;
    }
    UniquePtr<DecodedStreamData> ReleaseData() {     return;

   private:
    PlaybackInfoInit mInit;
    nsMainThreadPtrHandle<SharedDummyTrack> mDummyTrack;
    const nsTArray<RefPtr
    MozPromiseHolder  // is ref-counted.
    MozPromiseHolder<MediaSink::EndedPromise
    UniquePtr<DecodedStreamData> mData  TimeUnit currentPosition = GetPosition(&java.lang.StringIndexOutOfBoundsException: Index 44 out of bounds for length 0
    const float  for (uint32_t i = 0; i < video    VideoData* v = video[i];
    const     TimeUnit lastEnd = mData->mLastVideoEndTime.valueOr
    const bool mPreservesPitch;
    const RefPtr<nsISerialEventTarget> mDecoderThread;
  };

  MozPromiseHolder<      // TODO: |mLastVideoImage| should come from the last image rendered
  MozPromiseHolder<DecodedStream::EndedPromise>       // happens in the middle of playback (especially in th middle of a
  PlaybackInfoInit init{aStartTime      // and capture happens at 15 sec, we'll have to append a black frame
  nsCOMPtr                             currentTime + (lastEnd - currentPosition).ToTimeDuration());
      new R(std::move(init), mDummyTrack                                 aPrincipalHandle, mPlaybackRate);
            std::move(java.lang.StringIndexOutOfBoundsException: Index 28 out of bounds for length 5
            static_cast<float>(mPlaybackRate), static_cast      // before the last frame's end time for some videos. This only matters for
            mPreservesPitch, mOwnerThread);
  SyncRunnable::DispatchToThread(GetMainThreadSerialEventTarget      TimeStamp t =
  mData =                   currentTime + (lastEnd - currentPosition).ToTimeDuration      TimeUnit end = std::max(

  if (mData) {
    mAudioEndedPromise = mData->      mData->mLastVideoImageDisplaySize = v->mDisplay;
    mVideoEndedPromise =                                 &output, aPrincipalHandle, mPlaybackRate);
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
                                                &DecodedStream::NotifyOutput  bool forceBlack = false;
  if (output.GetDuration    mData->mVideoTrackWritten   }
    if (mVideoQueue.IsFinished()    if (!mData->mLastVideoImage) {
  return NS_OK;
}

void DecodedStream::Stop() {
  AssertOwnerThread();
  MOZ_ASSERT(mStartTime.isSome(), "playback not started.");

  TRACE("DecodedStream::Stop");
  LOG_DS(LogLevel::Debug, "Stop()");

  DisconnectListener();
  ResetVideo(mPrincipalHandle);
  ResetAudio();
  mStartTime.reset();
  mAudioEndedPromise = nullptr      auto start = mData->mLastVideoEndTime      mData->WriteVideoToSegment(
  mVideoEndedPromise          currentTime + (start - currentPosition).ToTimeDuration(), &java.lang.StringIndexOutOfBoundsException: Index 73 out of bounds for length 43

  // Clear mData immediately when this playback session ends so we won't
  // send data to the wrong track in SendData() in next playback session.             ", mPlaybackRate %lf, sample rate %" PRId32             start.ToString().get(), endSegment.GetDuration(),              mData->mVideoTrack->mSampleRate);
  DestroyData(std::move(mData    }

  mPrincipalHandle.DisconnectIfConnected();
  mWatchManager.Unwatch(mPlaying, &DecodedStream::PlayingChanged);
  mAudibilityMonitor.reset();
}

bool DecodedStream::IsStarted()  AssertOwnerThread();
  AssertOwnerThread();
  return mStartTime.isSome();
}

bool DecodedStream::  }
  AssertOwnerThread();
  return IsStarted    return;
}

void DecodedStream::Shutdown() {
    SendVideo(mPrincipalHandle);
  mPrincipalHandle.DisconnectIfConnected
  mWatchManager.ShutdownTimeUnit DecodedStream::GetEndTime(TrackType aType) const {
}

void   if (aType == TrackInfo::kAudioTrack && mInfo.HasAudio() && mData    auto t = mStartTime.ref() +
  AssertOwnerThread    if (t.IsValid()) {

  if (  } else if (aType == TrackInfo::kVideoTrack && mData) {
    return;
  }

  TRACE
  mOutputListenerTimeUnit DecodedStream::GetPosition(TimeStamp* aTimeStamp) {

  aData->Close  // This is only called after MDSM starts playback. So mStartTime is
  NS_DispatchToMainThread(
      NS_NewRunnableFunction("DecodedStream::DestroyData",
                             [data = std::move(aData)]() { data->Forget(); }));
}

void DecodedStream::SetPlaying(bool aPlaying) {  MOZ_ASSERT(mStartTime.isSome()  if (aTimeStamp) {
  AssertOwnerThread();

  // Resume/pause matters only when playback started.
  if  AssertOwnerThread();
    return;
  }

  if (profiler_thread_is_being_profiled_for_markers()) {  MOZ_ASSERT(mLastOutputTime < time);
    nsPrintfCString markerString  auto currentTime = GetPosition();
    PLAYBACK_PROFILER_MARKER(markerString);
  }
  LOG_DS(LogLevel::Debug, "playing ( currentTime.ToMicroseconds());
  mPlaying = aPlaying;
}

void DecodedStream::SetVolume(double  }
  AssertOwnerThread();
         currentTime.ToMicroseconds());
    nsPrintfCString markerString(  // Remove audio samples that have been played by MTG from the queue.
    PLAYBACK_PROFILER_MARKER(markerString  for (; a && a->GetEndTime() <= currentTime    LOG_DS(LogLevel::Debug, "Dropping audio [%"           a->mTime.ToMicroseconds(), a->GetEndTime().ToMicroseconds    RefPtr<AudioData> releaseMe = mAudioQueue.PopFront    a = mAudioQueue.PeekFront();
  }
  if (mVolume == aVolume
    return;
  }
  mVolume = aVolume;
  if (mData &&    ResetAudio();
    mData-java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
  mAudioPushListener = mAudioQueue.PushEvent(      mOwnerThread, this, &DecodedStream::SendData  mAudioFinishListener = mAudioQueue.FinishEvent().Connect(
}

void DecodedStream::SetPlaybackRate  mVideoFinishListener = mVideoQueue.FinishEvent().Connect(
  AssertOwnerThread();
  if (profiler_thread_is_being_profiled_for_markers())}
    nsPrintfCString markerString("void DecodedStream::DisconnectListener() {
      AssertOwnerThread();
  }
  if   mVideoPushListener.Disconnect();
    return;
  }
  mPlaybackRate = aPlaybackRate;
  if (mData && mData->mAudioTrack) {
    mData->java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
  }
}

void DecodedStream  aInfo.mDecodedStream.mInstance =
  AssertOwnerThread();
  if (profiler_thread_is_being_profiled_for_markers  aInfo.mDecodedStream.mLastOutputTime = mLastOutputTime.ToMicroseconds();
    nsPrintfCString  auto lastAudio = mAudioQueue.PeekBack();
                                 aPreservesPitch ? "true" :      lastAudio ? lastAudio->GetEndTime().ToMicroseconds() : -1;
    PLAYBACK_PROFILER_MARKER(markerString);      AssertedCast<int>(mAudioQueue.GetSize());
  }
  if (mPreservesPitch }
    return;
  }
  mPreservesPitch = aPreservesPitch;
  if (mData && mData->mAudioTrack) {
    mData->mAudioTrack->SetPreservesPitch(aPreservesPitch);
  }
}

RefPtr<GenericPromise> DecodedStream::SetAudioDevice(
    RefPtr<AudioDeviceInfo> aDevice) {
  // All audio is captured, so nothing is actually played out, so nothing to do.
  return GenericPromise::CreateAndResolve(true, __func__);
}

double DecodedStream::PlaybackRate() const {
  AssertOwnerThread();
  return mPlaybackRate;
}

void DecodedStream::SendAudio(const PrincipalHandle& aPrincipalHandle) {
  AssertOwnerThread();

  if (!mInfo.HasAudio()) {
    return;
  }

  if (mData->mHaveSentFinishAudio) {
    return;
  }

  TRACE("DecodedStream::SendAudio");
  // It's OK to hold references to the AudioData because AudioData
  // is ref-counted.
  AutoTArray<RefPtr<AudioData>, 10> audio;
  mAudioQueue.GetElementsAfter(mData->mNextAudioTime, &audio);

  // This will happen everytime when the media sink switches from `AudioSink` to
  // `DecodedStream`. If we don't insert the silence then the A/V will be out of
  // sync.
  RefPtr<AudioData> nextAudio = audio.IsEmpty() ? nullptr : audio[0];
  if (RefPtr<AudioData> silence = CreateSilenceDataIfGapExists(nextAudio)) {
    LOG_DS(LogLevel::Verbose, "Detect a gap in audio, insert silence=%u",
           silence->Frames());
    audio.InsertElementAt(0, silence);
  }

  // Append data which hasn't been sent to audio track before.
  mData->mAudioTrack->AppendData(audio, aPrincipalHandle);
  for (uint32_t i = 0; i < audio.Length(); ++i) {
    CheckIsDataAudible(audio[i]);
    mData->mNextAudioTime = audio[i]->GetEndTime();
    mData->mAudioFramesWritten += audio[i]->Frames();
  }

  if (mAudioQueue.IsFinished() && !mData->mHaveSentFinishAudio) {
    mData->mAudioTrack->NotifyEndOfStream();
    mData->mHaveSentFinishAudio = true;
  }
}

already_AddRefed<AudioData> DecodedStream::CreateSilenceDataIfGapExists(
    RefPtr<AudioData>& aNextAudio) {
  AssertOwnerThread();
  if (!aNextAudio) {
    return nullptr;
  }
  CheckedInt64 audioWrittenOffset =
      mData->mAudioFramesWritten +
      TimeUnitToFrames(*mStartTime, aNextAudio->mRate);
  CheckedInt64 frameOffset =
      TimeUnitToFrames(aNextAudio->mTime, aNextAudio->mRate);
  if (audioWrittenOffset.value() >= frameOffset.value()) {
    return nullptr;
  }
  // We've written less audio than our frame offset, return a silence data so we
  // have enough audio to be at the correct offset for our current frames.
  CheckedInt64 missingFrames = frameOffset - audioWrittenOffset;
  AlignedAudioBuffer silenceBuffer(missingFrames.value() *
                                   aNextAudio->mChannels);
  if (!silenceBuffer) {
    NS_WARNING("OOM in DecodedStream::CreateSilenceDataIfGapExists");
    return nullptr;
  }
  auto duration = media::TimeUnit(missingFrames.value(), aNextAudio->mRate);
  if (!duration.IsValid()) {
    NS_WARNING("Int overflow in DecodedStream::CreateSilenceDataIfGapExists");
    return nullptr;
  }
  RefPtr<AudioData> silenceData = new AudioData(
      aNextAudio->mOffset, aNextAudio->mTime, std::move(silenceBuffer),
      aNextAudio->mChannels, aNextAudio->mRate);
  MOZ_DIAGNOSTIC_ASSERT(duration == silenceData->mDuration, "must be equal");
  return silenceData.forget();
}

void DecodedStream::CheckIsDataAudible(const AudioData* aData) {
  MOZ_ASSERT(aData);

  mAudibilityMonitor->Process(aData);
  bool isAudible = mAudibilityMonitor->RecentlyAudible();

  if (isAudible != mIsAudioDataAudible) {
    mIsAudioDataAudible = isAudible;
    mAudibleEvent.Notify(mIsAudioDataAudible);
  }
}

void DecodedStreamData::WriteVideoToSegment(
    layers::Image* aImage, const TimeUnit& aStart, const TimeUnit& aEnd,
    const gfx::IntSize& aIntrinsicSize, const TimeStamp& aTimeStamp,
    VideoSegment* aOutput, const PrincipalHandle& aPrincipalHandle,
    double aPlaybackRate) {
  RefPtr<layers::Image> image = aImage;
  aOutput->AppendFrame(image.forget(), aIntrinsicSize, aPrincipalHandle, false,
                       aTimeStamp, media::TimeUnit::Invalid(), aStart);
  // Extend this so we get accurate durations for all frames.
  // Because this track is pushed, we need durations so the graph can track
  // when playout of the track has finished.
  MOZ_ASSERT(aPlaybackRate > 0);
  TrackTime start = aStart.ToTicksAtRate(mVideoTrack->mSampleRate);
  TrackTime end = aEnd.ToTicksAtRate(mVideoTrack->mSampleRate);
  aOutput->ExtendLastFrameBy(
      static_cast<TrackTime>((float)(end - start) / aPlaybackRate));

  mLastVideoStartTime = Some(aStart);
  mLastVideoEndTime = Some(aEnd);
  mLastVideoTimeStamp = aTimeStamp;
}

static bool ZeroDurationAtLastChunk(VideoSegment& aInput) {
  // Get the last video frame's start time in VideoSegment aInput.
  // If the start time is equal to the duration of aInput, means the last video
  // frame's duration is zero.
  TrackTime lastVideoStratTime;
  aInput.GetLastFrame(&lastVideoStratTime);
  return lastVideoStratTime == aInput.GetDuration();
}

void DecodedStream::ResetAudio() {
  AssertOwnerThread();

  if (!mData) {
    return;
  }

  if (!mInfo.HasAudio()) {
    return;
  }

  TRACE("DecodedStream::ResetAudio");
  mData->mAudioTrack->ClearFutureData();
  if (const RefPtr<AudioData>& v = mAudioQueue.PeekFront()) {
    mData->mNextAudioTime = v->mTime;
    mData->mHaveSentFinishAudio = false;
  }
}

void DecodedStream::ResetVideo(const PrincipalHandle& aPrincipalHandle) {
  AssertOwnerThread();

  if (!mData) {
    return;
  }

  if (!mInfo.HasVideo()) {
    return;
  }

  TRACE("DecodedStream::ResetVideo");
  TrackTime cleared = mData->mVideoTrack->ClearFutureData();
  mData->mVideoTrackWritten -= cleared;
  if (mData->mHaveSentFinishVideo && cleared > 0) {
    mData->mHaveSentFinishVideo = false;
    mData->mListener->EndVideoTrackAt(mData->mVideoTrack, TRACK_TIME_MAX);
  }

  VideoSegment resetter;
  TimeStamp currentTime;
  TimeUnit currentPosition = GetPosition(¤tTime);

  // Giving direct consumers a frame (really *any* frame, so in this case:
  // nullptr) at an earlier time than the previous, will signal to that consumer
  // to discard any frames ahead in time of the new frame. To be honest, this is
  // an ugly hack because the direct listeners of the MediaTrackGraph do not
  // have an API that supports clearing the future frames. ImageContainer and
  // VideoFrameContainer do though, and we will need to move to a similar API
  // for video tracks as part of bug 1493618.
  resetter.AppendFrame(nullptr, mData->mLastVideoImageDisplaySize,
                       aPrincipalHandle, false, currentTime);
  mData->mVideoTrack->AppendData(&resetter);

  // Consumer buffers have been reset. We now set the next time to the start
  // time of the current frame, so that it can be displayed again on resuming.
  if (RefPtr<VideoData> v = mVideoQueue.PeekFront()) {
    mData->mLastVideoStartTime = Some(v->mTime - TimeUnit::FromMicroseconds(1));
    mData->mLastVideoEndTime = Some(v->mTime);
  } else {
    // There was no current frame in the queue. We set the next time to the
    // current time, so we at least don't resume starting in the future.
    mData->mLastVideoStartTime =
        Some(currentPosition - TimeUnit::FromMicroseconds(1));
    mData->mLastVideoEndTime = Some(currentPosition);
  }

  mData->mLastVideoTimeStamp = currentTime;
}

void DecodedStream::SendVideo(const PrincipalHandle& aPrincipalHandle) {
  AssertOwnerThread();

  if (!mInfo.HasVideo()) {
    return;
  }

  if (mData->mHaveSentFinishVideo) {
    return;
  }

  TRACE("DecodedStream::SendVideo");
  VideoSegment output;
  AutoTArray<RefPtr<VideoData>, 10> video;

  // It's OK to hold references to the VideoData because VideoData
  // is ref-counted.
  mVideoQueue.GetElementsAfter(
      mData->mLastVideoStartTime.valueOr(mStartTime.ref()), &video);

  TimeStamp currentTime;
  TimeUnit currentPosition = GetPosition(¤tTime);

  if (mData->mLastVideoTimeStamp.IsNull()) {
    mData->mLastVideoTimeStamp = currentTime;
  }

  for (uint32_t i = 0; i < video.Length(); ++i) {
    VideoData* v = video[i];
    TimeUnit lastStart = mData->mLastVideoStartTime.valueOr(
        mStartTime.ref() - TimeUnit::FromMicroseconds(1));
    TimeUnit lastEnd = mData->mLastVideoEndTime.valueOr(mStartTime.ref());

    if (lastEnd < v->mTime) {
      // Write last video frame to catch up. mLastVideoImage can be null here
      // which is fine, it just means there's no video.

      // TODO: |mLastVideoImage| should come from the last image rendered
      // by the state machine. This will avoid the black frame when capture
      // happens in the middle of playback (especially in th middle of a
      // video frame). E.g. if we have a video frame that is 30 sec long
      // and capture happens at 15 sec, we'll have to append a black frame
      // that is 15 sec long.
      TimeStamp t =
          std::max(mData->mLastVideoTimeStamp,
                   currentTime + (lastEnd - currentPosition).ToTimeDuration());
      mData->WriteVideoToSegment(mData->mLastVideoImage, lastEnd, v->mTime,
                                 mData->mLastVideoImageDisplaySize, t, &output,
                                 aPrincipalHandle, mPlaybackRate);
      lastEnd = v->mTime;
    }

    if (lastStart < v->mTime) {
      // This frame starts after the last frame's start. Note that this could be
      // before the last frame's end time for some videos. This only matters for
      // the track's lifetime in the MTG, as rendering is based on timestamps,
      // aka frame start times.
      TimeStamp t =
          std::max(mData->mLastVideoTimeStamp,
                   currentTime + (lastEnd - currentPosition).ToTimeDuration());
      TimeUnit end = std::max(
          v->GetEndTime(),
          lastEnd + TimeUnit::FromMicroseconds(
                        mData->mVideoTrack->TrackTimeToMicroseconds(1) + 1));
      mData->mLastVideoImage = v->mImage;
      mData->mLastVideoImageDisplaySize = v->mDisplay;
      mData->WriteVideoToSegment(v->mImage, lastEnd, end, v->mDisplay, t,
                                 &output, aPrincipalHandle, mPlaybackRate);
    }
  }

  // Check the output is not empty.
  bool compensateEOS = false;
  bool forceBlack = false;
  if (output.GetLastFrame()) {
    compensateEOS = ZeroDurationAtLastChunk(output);
  }

  if (output.GetDuration() > 0) {
    mData->mVideoTrackWritten += mData->mVideoTrack->AppendData(&output);
  }

  if (mVideoQueue.IsFinished() && !mData->mHaveSentFinishVideo) {
    if (!mData->mLastVideoImage) {
      // We have video, but the video queue finished before we received any
      // frame. We insert a black frame to progress any consuming
      // HTMLMediaElement. This mirrors the behavior of VideoSink.

      // Force a frame - can be null
      compensateEOS = true;
      // Force frame to be black
      forceBlack = true;
      // Override the frame's size (will be 0x0 otherwise)
      mData->mLastVideoImageDisplaySize = mInfo.mVideo.mDisplay;
      LOG_DS(LogLevel::Debug, "No mLastVideoImage");
    }
    if (compensateEOS) {
      VideoSegment endSegment;
      auto start = mData->mLastVideoEndTime.valueOr(mStartTime.ref());
      mData->WriteVideoToSegment(
          mData->mLastVideoImage, start, start,
          mData->mLastVideoImageDisplaySize,
          currentTime + (start - currentPosition).ToTimeDuration(), &endSegment,
          aPrincipalHandle, mPlaybackRate);
      // ForwardedInputTrack drops zero duration frames, even at the end of
      // the track.  Give the frame a minimum duration so that it is not
      // dropped.
      endSegment.ExtendLastFrameBy(1);
      LOG_DS(LogLevel::Debug,
             "compensateEOS: start %s, duration %" PRId64
             ", mPlaybackRate %lf, sample rate %" PRId32,
             start.ToString().get(), endSegment.GetDuration(), mPlaybackRate,
             mData->mVideoTrack->mSampleRate);
      MOZ_ASSERT(endSegment.GetDuration() > 0);
      if (forceBlack) {
        endSegment.ReplaceWithDisabled();
      }
      mData->mVideoTrackWritten += mData->mVideoTrack->AppendData(&endSegment);
    }
    mData->mListener->EndVideoTrackAt(mData->mVideoTrack,
                                      mData->mVideoTrackWritten);
    mData->mHaveSentFinishVideo = true;
  }
}

void DecodedStream::SendData() {
  AssertOwnerThread();

  // Not yet created on the main thread. MDSM will try again later.
  if (!mData) {
    return;
  }

  if (!mPlaying) {
    return;
  }

  LOG_DS(LogLevel::Verbose, "SendData()");
  SendAudio(mPrincipalHandle);
  SendVideo(mPrincipalHandle);
}

TimeUnit DecodedStream::GetEndTime(TrackType aType) const {
  AssertOwnerThread();
  TRACE("DecodedStream::GetEndTime");
  if (aType == TrackInfo::kAudioTrack && mInfo.HasAudio() && mData) {
    auto t = mStartTime.ref() +
             media::TimeUnit(mData->mAudioFramesWritten, mInfo.mAudio.mRate);
    if (t.IsValid()) {
      return t;
    }
  } else if (aType == TrackInfo::kVideoTrack && mData) {
    return mData->mLastVideoEndTime.valueOr(mStartTime.ref());
  }
  return TimeUnit::Zero();
}

TimeUnit DecodedStream::GetPosition(TimeStamp* aTimeStamp) {
  AssertOwnerThread();
  TRACE("DecodedStream::GetPosition");
  // This is only called after MDSM starts playback. So mStartTime is
  // guaranteed to be something.
  MOZ_ASSERT(mStartTime.isSome());
  if (aTimeStamp) {
    *aTimeStamp = TimeStamp::Now();
  }
  return mStartTime.ref() + mLastOutputTime;
}

void DecodedStream::NotifyOutput(int64_t aTime) {
  AssertOwnerThread();
  TimeUnit time = TimeUnit::FromMicroseconds(aTime);
  if (time == mLastOutputTime) {
    return;
  }
  MOZ_ASSERT(mLastOutputTime < time);
  mLastOutputTime = time;
  auto currentTime = GetPosition();

  if (profiler_thread_is_being_profiled_for_markers()) {
    nsPrintfCString markerString("OutputTime=%" PRId64,
                                 currentTime.ToMicroseconds());
    PLAYBACK_PROFILER_MARKER(markerString);
  }
  LOG_DS(LogLevel::Verbose, "time is now %" PRId64,
         currentTime.ToMicroseconds());

  // Remove audio samples that have been played by MTG from the queue.
  RefPtr<AudioData> a = mAudioQueue.PeekFront();
  for (; a && a->GetEndTime() <= currentTime;) {
    LOG_DS(LogLevel::Debug, "Dropping audio [%" PRId64 ",%" PRId64 "]",
           a->mTime.ToMicroseconds(), a->GetEndTime().ToMicroseconds());
    RefPtr<AudioData> releaseMe = mAudioQueue.PopFront();
    a = mAudioQueue.PeekFront();
  }
}

void DecodedStream::PlayingChanged() {
  AssertOwnerThread();
  TRACE("DecodedStream::PlayingChanged");

  if (!mPlaying) {
    // On seek or pause we discard future frames.
    ResetVideo(mPrincipalHandle);
    ResetAudio();
  }
}

void DecodedStream::ConnectListener() {
  AssertOwnerThread();

  mAudioPushListener = mAudioQueue.PushEvent().Connect(
      mOwnerThread, this, &DecodedStream::SendData);
  mAudioFinishListener = mAudioQueue.FinishEvent().Connect(
      mOwnerThread, this, &DecodedStream::SendData);
  mVideoPushListener = mVideoQueue.PushEvent().Connect(
      mOwnerThread, this, &DecodedStream::SendData);
  mVideoFinishListener = mVideoQueue.FinishEvent().Connect(
      mOwnerThread, this, &DecodedStream::SendData);
  mWatchManager.Watch(mPlaying, &DecodedStream::SendData);
}

void DecodedStream::DisconnectListener() {
  AssertOwnerThread();

  mAudioPushListener.Disconnect();
  mVideoPushListener.Disconnect();
  mAudioFinishListener.Disconnect();
  mVideoFinishListener.Disconnect();
  mWatchManager.Unwatch(mPlaying, &DecodedStream::SendData);
}

void DecodedStream::GetDebugInfo(dom::MediaSinkDebugInfo& aInfo) {
  AssertOwnerThread();
  int64_t startTime = mStartTime.isSome() ? mStartTime->ToMicroseconds() : -1;
  aInfo.mDecodedStream.mInstance =
      NS_ConvertUTF8toUTF16(nsPrintfCString("%p"this));
  aInfo.mDecodedStream.mStartTime = startTime;
  aInfo.mDecodedStream.mLastOutputTime = mLastOutputTime.ToMicroseconds();
  aInfo.mDecodedStream.mPlaying = mPlaying.Ref();
  auto lastAudio = mAudioQueue.PeekBack();
  aInfo.mDecodedStream.mLastAudio =
      lastAudio ? lastAudio->GetEndTime().ToMicroseconds() : -1;
  aInfo.mDecodedStream.mAudioQueueFinished = mAudioQueue.IsFinished();
  aInfo.mDecodedStream.mAudioQueueSize =
      AssertedCast<int>(mAudioQueue.GetSize());
  if (mData) {
    mData->GetDebugInfo(aInfo.mDecodedStream.mData);
  }
}

#undef LOG_DS

}  // namespace mozilla

Messung V0.5
C=93 H=95 G=93

¤ Dauer der Verarbeitung: 0.19 Sekunden  (vorverarbeitet)  ¤

*© Formatika GbR, Deutschland






Wurzel

Suchen

Beweissystem der NASA

Beweissystem Isabelle

NIST Cobol Testsuite

Cephes Mathematical Library

Wiener Entwicklungsmethode

Haftungshinweis

Die Informationen auf dieser Webseite wurden nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit, noch Qualität der bereit gestellten Informationen zugesichert.

Bemerkung:

Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.






                                                                                                                                                                                                                                                                                                                                                                                                     


Neuigkeiten

     Aktuelles
     Motto des Tages

Software

     Produkte
     Quellcodebibliothek

Aktivitäten

     Artikel über Sicherheit
     Anleitung zur Aktivierung von SSL

Muße

     Gedichte
     Musik
     Bilder

Jenseits des Üblichen ....
    

Besucherstatistik

Besucherstatistik

Monitoring

Montastic status badge