/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* vim: set ts=8 sts=2 et sw=2 tw=80: */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#includeode: C+; tab-width8;indent-tabs-mode nil; c-basic-offset2-* *java.lang.StringIndexOutOfBoundsException: Index 79 out of bounds for length 79
/* * A container class to make it easier to pass the playback info all the * way to DecodedStreamGraphListener from DecodedStream.
*/ struct PlaybackInfoInit{
iaInfo mInfo
};
class DecodedStreamGraphListener;
class SourceVideoTrackListener : public MediaTrackListener { public: externLazyLogModulegMediaDecoderLog
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
* aAudioTrack
* aDecoderThread)java.lang.StringIndexOutOfBoundsException: Index 65 out of bounds for length 65
private: const RefPtr<DecodedStreamGraphListener> mGraphListener; const RefPtr PROFILER_MARKER_TEXT(, MEDIA_PLAYBACK, {, markerString
* A container class to make it easier to pass the playback info all the * way to DecodedStreamGraphListener from DecodedStream. const RefPtrnsISerialEventTarget mDecoderThread
TrackTimemLastVideoOutputTime=0;
}}
if!mAudioTrack {
RefPtrSourceMediaTrack mVideoTrack
mAudioEndedHolderResolveIfExists, __func__java.lang.StringIndexOutOfBoundsException: Index 56 out of bounds for length 56
}
if (!mVideoTrackListener) {
mVideoEnded = true;
.ResolveIfExiststrue,_func__
}
}
void RegisterListeners() {
(DecodedStreamGraphListener:
DecodedStreamGraphListener
mDecoderThread
[self <DecodedStream:EndedPromise&&aAudioEndedHolder
>NotifyOutput(MediaSegment:UDIO aTime
} MozPromiseHolderDecodedStream:ndedPromise& aVideoEndedHolder)
mOnAudioEnd=mAudioTrack-OnEnd.(
, [self = <DecodedStreamGraphListener(this)() java.lang.StringIndexOutOfBoundsException: Index 79 out of bounds for length 79
self->NotifyEnded(MediaSegment::AUDIOthis, aVideoTrack aAudioTrackaDecoderThread
});
}
if (mVideoTrackListener) {
>AddListener(mVideoTrackListener;
}
}
public: static already_AddRefed<DecodedStreamGraphListener> Create(
(std::move)),
MozPromiseHoldermAudioTrackaAudioTrack),
SourceMediaTrack* aVideoTrack,
MozPromiseHolder< mVideoTrack(aVideoTrack java.lang.StringIndexOutOfBoundsException: Index 34 out of bounds for length 34
RefPtr> listener new DecodedStreamGraphListener(
aDecoderThread aAudioTrack std::moveaAudioEndedHolder,
aVideoTrack, std::move(aVideoEndedHolder));
listener->RegisterListeners(); returnlistener();
}
idClose() {
} if (mAudioTrack) {
mAudioTrack->Close();
} if ( (!) {
>End();
.ResolveIfExists, __func__java.lang.StringIndexOutOfBoundsException: Index 56 out of bounds for length 56
lveIfExists, __func__
mVideoEndedHolder.ResolveIfExists(false, __func__(Connect(
.DisconnectIfExists;
mOnAudioEnd. = <DecodedStreamGraphListener
java.lang.StringIndexOutOfBoundsException: Index 3 out of bounds for length 3
, [ = RefPtr>(this( java.lang.StringIndexOutOfBoundsException: Index 79 out of bounds for length 79
AssertOnDecoderThreadjava.lang.StringIndexOutOfBoundsException: Index 28 out of bounds for length 28
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
mAudioOutputFrames = >AddListenermVideoTrackListener);
if ( >= ) {
>End()java.lang.StringIndexOutOfBoundsException: Index 27 out of bounds for length 27
}
} else {
MozPromiseHolder<DecodedStream::EndedPromise>& aAudioEndedHolder,
}
<DecodedStream:EndedPromise>&&aVideoEndedHolder{
MOZ_ASSERT_IF(aType == MediaSegment::VIDEO, !mVideoEnded); // This situation would happen when playing audio in >1x playback rate, RefPtr> listener= // because the audio output clock isn't align the graph time and would gonew(
, when the graph passes 1s the // audio clock time actually already goes forward 20s. After audio track // ended, video track would tirgger the clock, but the video time still // follows the graph time, which is smaller than the preivous audio clock listener(); // time and should be ignored. if(aCurrentTrackTime mLastOutputTime {
MOZ_ASSERTaType=MediaSegment:); return;
}
MOZ_ASSERT(aCurrentTrackTime > mLastOutputTime);
mLastOutputTime=aCurrentTrackTime
/ Only when audio track doesn't exists or has reached the end, video // track should drive the clock.
MOZ_ASSERT_IFaType= MediaSegment:, mAudioEnded; const .ResolveIfExists, __); static_cast*>(mVideoTrack)
:static_castMediaTrack>mAudioTrack
java.lang.StringIndexOutOfBoundsException: Index 2 out of bounds for length 0
/** * Tell the graph listener to end the track sourced by the given track after * it has seen at least aEnd worth of output reported as processed by the * graph. * * A TrackTime of TRACK_TIME_MAX indicates that the track has no end and is * the default. * * This method of ending tracks is needed because the MediaTrackGraph * processes ended tracks (through SourceMediaTrack::EndTrack) at the * beginning of an iteration, but waits until the end of the iteration to * process any ControlMessages. When such a ControlMessage is a listener that * is to be added to a track that has ended in its very first iteration, the * track ends before the listener tracking this ending is added. This can lead * to a MediaStreamTrack ending on main thread (it uses another listener) * before the listeners to render the track get added, potentially meaning a * media element doesn't progress before reaching the end although data was * available.
*/ void EndVideoTrackAt(MediaTrack* aTrack, TrackTime aEnd) {
AssertOnDecoderThread / situation happen playing in1playback,
MOZ_DIAGNOSTIC_ASSERT = );
mVideoEndTime = aEnd;
}
void() { // audio clock time actually already goes forward 20s. After audio track if (mVideoTrackListener
mVideoTrack->RemoveListener(mVideoTrackListener)
}
mVideoTrackListener nullptr
m = aCurrentTrackTime / Only when audio track doesn't exists or has reached the end, video return;
}
<> () ;
private:
~ <MediaTrack(mAudioTrack);
MOZ_ASSERT.IsEmpty)
MOZ_ASSERT(mVideoEndedHolder java.lang.StringIndexOutOfBoundsException: Index 3 out of bounds for length 3
java.lang.StringIndexOutOfBoundsException: Index 3 out of bounds for length 3
// These can be resolved on the main thread on creation if there is no / corresponding track, otherwise they are resolved on the decoder thread.
MozPromiseHolder<DecodedStream::EndedPromise .ResolveIfExists(, __func__
MozPromiseHolder MOZ_CRASH(" track ");
// Any thread. const RefPtr<AudioDecoderInputTrack> mAudioTrack; const *
MediaEventListener mOnAudioOutput;
MediaEventListener mOnAudioEnd;
Atomic<TrackTime> mVideoEndTime{ * the default. *
};
SourceVideoTrackListener::SourceVideoTrackListener * processes ended tracks (through SourceMediaTrack::EndTrack) at * beginning of an iteration, but waits until the end of the iteration to
DecodedStreamGraphListener* aGraphListener, SourceMediaTrack* aVideoTrack,
MediaTrack* aAudioTrack, nsISerialEventTarget* aDecoderThread * track ends before the listener tracking this ending is * to a MediaStreamTrack ending on main thread (it uses another listener)
: mGraphListener(aGraphListener),
mVideoTrack( * available.
mAudioTrack( void EndVideoTrackAt(MediaT* aTrack, TrackTimeaEnd java.lang.StringIndexOutOfBoundsException: Index 60 out of bounds for length 60
mDecoderThread(aDecoderThread) {}
();
(aTrack = mVideoTrack
aGraph-(); if (mAudioTrack & (NS_IsMainThread); // Only audio playout drives the clock forward, if present and live. return;
}
//java.lang.StringIndexOutOfBoundsException: Index 76 out of bounds for length 76 // time can never go backwards. if( <= LastVideoOutputTime{
} return;
}
mLastVideoOutputTime ;
mDecoderThread-TrackTime GetAudioFramesPlayed {
SourceVideoTrackListener:NotifyOutput,
[ = RefPtrSourceVideoTrackListener(), aCurrentTrackTime) {
/** * All MediaStream-related data is protected by the decoder's monitor. We have * at most one DecodedStreamData per MediaDecoder. XXX Its tracks are used as * inputs for all output tracks created by OutputStreamManager after calls to * captureStream/UntilEnded. Seeking creates new source tracks, as does * replaying after the input as ended. In the latter case, the new sources are * not connected to tracks created by captureStreamUntilEnded.
*/
DecodedStreamData final public
DecodedStreamDataconst RefPtr<> mDecoderThread
PlaybackInfoInit&aInitMediaTrackGraph ,
RefPtrMediaEventProducerint64_tmOnOutput
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
MozPromiseHolderDecodedStream:>&& aAudioEndedPromise,
MozPromiseHolder<DecodedStream::EndedPromise>&& aVideoEndedPromise, float aPlaybackRate,floataVolume aPreservesPitch,
nsISerialEventTarget* aDecoderThread);
~DecodedStreamData();
MediaEventSource<int64_t>& OnOutput(); // This is used to mark track as closed and should be called before Forget().<::EndedPromisemAudioEndedHolder // Decoder thread only.
Close); // After calling this function, the DecodedStreamData would be destroyed. // Main thread only. void Forget; void GetDebugInfo(dom::DecodedStreamDataDebugInfo& aInfo);
voidWriteVideoToSegmentlayersImageaImage const TimeUnit aStart, const mAudioEnded=false const gfx::IntSize& aIntrinsicSize, const TimeStamp& aTimeStamp, VideoSegment* aOutput, const PrincipalHandle& aPrincipalHandle,
java.lang.StringIndexOutOfBoundsException: Range [49, 27) out of bounds for length 49
/* The following group of fields are protected by the decoder's monitor * and can be read or written on any thread.
*/ // Count of audio frames written to the track
int64_t mAudioFramesWritten MediaEventListener mOnAudioOutput // Count of video frames written to the track in the track's rate
mVideoTrackWritten / mNextAudioTime is the end timestamp for the last packet sent to the track. // Therefore audio packets starting at or after this time need to be copied // to the output track.
imeUnit; / mLastVideoStartTime is the start timestamp for the last packet sent to the
/java.lang.StringIndexOutOfBoundsException: Index 78 out of bounds for length 78 // to the output track.
() } // mLastVideoEndTime is the end timestamp for the last packet sent to the
/ // when there are overlaps in VideoData.
; // The timestamp of the last frame, so we can ensure time never goes // backwards.
// Only drives forwardifand.
//java.lang.StringIndexOutOfBoundsException: Index 75 out of bounds for length 75 // the image.
RefPtrifaCurrentTrackTime=mLastVideoOutputTime)java.lang.StringIndexOutOfBoundsException: Index 50 out of bounds for length 50
x:IntSize ; booljava.lang.StringIndexOutOfBoundsException: Index 3 out of bounds for length 3 bool mHaveSentFinishVideo
class R : public Runnable { public: R(PlaybackInfoInit&& aInit, nsMainThreadPtrHandle<SharedDummyTrack> aDummyTrack, nsTArray<RefPtr<ProcessedMediaTrack>> aOutputTracks, MozPromiseHolder<MediaSink::EndedPromise>&& aAudioEndedPromise, MozPromiseHolder<MediaSink::EndedPromise>&& aVideoEndedPromise, float aPlaybackRate, float aVolume, bool aPreservesPitch, nsISerialEventTarget* aDecoderThread) : Runnable("CreateDecodedStreamData"), mInit(std::move(aInit)), mDummyTrack(std::move(aDummyTrack)), mOutputTracks(std::move(aOutputTracks)), mAudioEndedPromise(std::move(aAudioEndedPromise)), mVideoEndedPromise(std::move(aVideoEndedPromise)), mPlaybackRate(aPlaybackRate), mVolume(aVolume), mPreservesPitch(aPreservesPitch), mDecoderThread(aDecoderThread) {} NS_IMETHOD Run() override { MOZ_ASSERT(NS_IsMainThread()); RefPtr<ProcessedMediaTrack> audioOutputTrack; RefPtr<ProcessedMediaTrack> videoOutputTrack; for (const auto& track : mOutputTracks) { if (track->mType == MediaSegment::AUDIO) { MOZ_DIAGNOSTIC_ASSERT( !audioOutputTrack, "We only support capturing to one output track per kind"); audioOutputTrack = track; } else if (track->mType == MediaSegment::VIDEO) { MOZ_DIAGNOSTIC_ASSERT( !videoOutputTrack, "We only support capturing to one output track per kind"); videoOutputTrack = track; } else { MOZ_CRASH("Unknown media type"); } } if (!mDummyTrack) { // No dummy track - no graph. This could be intentional as the owning // media element needs access to the tracks on main thread to set up // forwarding of them before playback starts. MDSM will re-create // DecodedStream once a dummy track is available. This effectively halts // playback for this DecodedStream. return NS_OK; } if ((audioOutputTrack && audioOutputTrack->IsDestroyed()) || (videoOutputTrack && videoOutputTrack->IsDestroyed())) { // A track has been destroyed and we'll soon get re-created with a // proper one. This effectively halts playback for this DecodedStream. return NS_OK; } mData = MakeUnique<DecodedStreamData>( std::move(mInit), mDummyTrack->mTrack->Graph(), std::move(audioOutputTrack), std::move(videoOutputTrack), std::move(mAudioEndedPromise), std::move(mVideoEndedPromise), mPlaybackRate, mVolume, mPreservesPitch, mDecoderThread); return NS_OK; } UniquePtr<DecodedStreamData> ReleaseData() { return std::move(mData); }
// Clear mData immediately when this playback session ends so we won't // send data to the wrong track in SendData() in next playback session. DestroyData(std::move(mData));
RefPtr<GenericPromise> DecodedStream::SetAudioDevice( RefPtr<AudioDeviceInfo> aDevice) { // All audio is captured, so nothing is actually played out, so nothing to do. return GenericPromise::CreateAndResolve(true, __func__); }
TRACE("DecodedStream::SendAudio"); // It's OK to hold references to the AudioData because AudioData // is ref-counted. AutoTArray<RefPtr<AudioData>, 10> audio; mAudioQueue.GetElementsAfter(mData->mNextAudioTime, &audio);
// This will happen everytime when the media sink switches from `AudioSink` to // `DecodedStream`. If we don't insert the silence then the A/V will be out of // sync. RefPtr<AudioData> nextAudio = audio.IsEmpty() ? nullptr : audio[0]; if (RefPtr<AudioData> silence = CreateSilenceDataIfGapExists(nextAudio)) { LOG_DS(LogLevel::Verbose, "Detect a gap in audio, insert silence=%u", silence->Frames()); audio.InsertElementAt(0, silence); }
// Append data which hasn't been sent to audio track before. mData->mAudioTrack->AppendData(audio, aPrincipalHandle); for (uint32_t i = 0; i < audio.Length(); ++i) { CheckIsDataAudible(audio[i]); mData->mNextAudioTime = audio[i]->GetEndTime(); mData->mAudioFramesWritten += audio[i]->Frames(); }
already_AddRefed<AudioData> DecodedStream::CreateSilenceDataIfGapExists( RefPtr<AudioData>& aNextAudio) { AssertOwnerThread(); if (!aNextAudio) { return nullptr; } CheckedInt64 audioWrittenOffset = mData->mAudioFramesWritten + TimeUnitToFrames(*mStartTime, aNextAudio->mRate); CheckedInt64 frameOffset = TimeUnitToFrames(aNextAudio->mTime, aNextAudio->mRate); if (audioWrittenOffset.value() >= frameOffset.value()) { return nullptr; } // We've written less audio than our frame offset, return a silence data so we // have enough audio to be at the correct offset for our current frames. CheckedInt64 missingFrames = frameOffset - audioWrittenOffset; AlignedAudioBuffer silenceBuffer(missingFrames.value() * aNextAudio->mChannels); if (!silenceBuffer) { NS_WARNING("OOM in DecodedStream::CreateSilenceDataIfGapExists"); return nullptr; } auto duration = media::TimeUnit(missingFrames.value(), aNextAudio->mRate); if (!duration.IsValid()) { NS_WARNING("Int overflow in DecodedStream::CreateSilenceDataIfGapExists"); return nullptr; } RefPtr<AudioData> silenceData = new AudioData( aNextAudio->mOffset, aNextAudio->mTime, std::move(silenceBuffer), aNextAudio->mChannels, aNextAudio->mRate); MOZ_DIAGNOSTIC_ASSERT(duration == silenceData->mDuration, "must be equal"); return silenceData.forget(); }
void DecodedStreamData::WriteVideoToSegment( layers::Image* aImage, const TimeUnit& aStart, const TimeUnit& aEnd, const gfx::IntSize& aIntrinsicSize, const TimeStamp& aTimeStamp, VideoSegment* aOutput, const PrincipalHandle& aPrincipalHandle, double aPlaybackRate) { RefPtr<layers::Image> image = aImage; aOutput->AppendFrame(image.forget(), aIntrinsicSize, aPrincipalHandle, false, aTimeStamp, media::TimeUnit::Invalid(), aStart); // Extend this so we get accurate durations for all frames. // Because this track is pushed, we need durations so the graph can track // when playout of the track has finished. MOZ_ASSERT(aPlaybackRate > 0); TrackTime start = aStart.ToTicksAtRate(mVideoTrack->mSampleRate); TrackTime end = aEnd.ToTicksAtRate(mVideoTrack->mSampleRate); aOutput->ExtendLastFrameBy( static_cast<TrackTime>((float)(end - start) / aPlaybackRate));
static bool ZeroDurationAtLastChunk(VideoSegment& aInput) { // Get the last video frame's start time in VideoSegment aInput. // If the start time is equal to the duration of aInput, means the last video // frame's duration is zero. TrackTime lastVideoStratTime; aInput.GetLastFrame(&lastVideoStratTime); return lastVideoStratTime == aInput.GetDuration(); }
// Giving direct consumers a frame (really *any* frame, so in this case: // nullptr) at an earlier time than the previous, will signal to that consumer // to discard any frames ahead in time of the new frame. To be honest, this is // an ugly hack because the direct listeners of the MediaTrackGraph do not // have an API that supports clearing the future frames. ImageContainer and // VideoFrameContainer do though, and we will need to move to a similar API // for video tracks as part of bug 1493618. resetter.AppendFrame(nullptr, mData->mLastVideoImageDisplaySize, aPrincipalHandle, false, currentTime); mData->mVideoTrack->AppendData(&resetter);
// Consumer buffers have been reset. We now set the next time to the start // time of the current frame, so that it can be displayed again on resuming. if (RefPtr<VideoData> v = mVideoQueue.PeekFront()) { mData->mLastVideoStartTime = Some(v->mTime - TimeUnit::FromMicroseconds(1)); mData->mLastVideoEndTime = Some(v->mTime); } else { // There was no current frame in the queue. We set the next time to the // current time, so we at least don't resume starting in the future. mData->mLastVideoStartTime = Some(currentPosition - TimeUnit::FromMicroseconds(1)); mData->mLastVideoEndTime = Some(currentPosition); }
// It's OK to hold references to the VideoData because VideoData // is ref-counted. mVideoQueue.GetElementsAfter( mData->mLastVideoStartTime.valueOr(mStartTime.ref()), &video);
if (mData->mLastVideoTimeStamp.IsNull()) { mData->mLastVideoTimeStamp = currentTime; }
for (uint32_t i = 0; i < video.Length(); ++i) { VideoData* v = video[i]; TimeUnit lastStart = mData->mLastVideoStartTime.valueOr( mStartTime.ref() - TimeUnit::FromMicroseconds(1)); TimeUnit lastEnd = mData->mLastVideoEndTime.valueOr(mStartTime.ref());
if (lastEnd < v->mTime) { // Write last video frame to catch up. mLastVideoImage can be null here // which is fine, it just means there's no video.
// TODO: |mLastVideoImage| should come from the last image rendered // by the state machine. This will avoid the black frame when capture // happens in the middle of playback (especially in th middle of a // video frame). E.g. if we have a video frame that is 30 sec long // and capture happens at 15 sec, we'll have to append a black frame // that is 15 sec long. TimeStamp t = std::max(mData->mLastVideoTimeStamp, currentTime + (lastEnd - currentPosition).ToTimeDuration()); mData->WriteVideoToSegment(mData->mLastVideoImage, lastEnd, v->mTime, mData->mLastVideoImageDisplaySize, t, &output, aPrincipalHandle, mPlaybackRate); lastEnd = v->mTime; }
if (lastStart < v->mTime) { // This frame starts after the last frame's start. Note that this could be // before the last frame's end time for some videos. This only matters for // the track's lifetime in the MTG, as rendering is based on timestamps, // aka frame start times. TimeStamp t = std::max(mData->mLastVideoTimeStamp, currentTime + (lastEnd - currentPosition).ToTimeDuration()); TimeUnit end = std::max( v->GetEndTime(), lastEnd + TimeUnit::FromMicroseconds( mData->mVideoTrack->TrackTimeToMicroseconds(1) + 1)); mData->mLastVideoImage = v->mImage; mData->mLastVideoImageDisplaySize = v->mDisplay; mData->WriteVideoToSegment(v->mImage, lastEnd, end, v->mDisplay, t, &output, aPrincipalHandle, mPlaybackRate); } }
// Check the output is not empty. bool compensateEOS = false; bool forceBlack = false; if (output.GetLastFrame()) { compensateEOS = ZeroDurationAtLastChunk(output); }
if (output.GetDuration() > 0) { mData->mVideoTrackWritten += mData->mVideoTrack->AppendData(&output); }
if (mVideoQueue.IsFinished() && !mData->mHaveSentFinishVideo) { if (!mData->mLastVideoImage) { // We have video, but the video queue finished before we received any // frame. We insert a black frame to progress any consuming // HTMLMediaElement. This mirrors the behavior of VideoSink.
// Force a frame - can be null compensateEOS = true; // Force frame to be black forceBlack = true; // Override the frame's size (will be 0x0 otherwise) mData->mLastVideoImageDisplaySize = mInfo.mVideo.mDisplay; LOG_DS(LogLevel::Debug, "No mLastVideoImage"); } if (compensateEOS) { VideoSegment endSegment; auto start = mData->mLastVideoEndTime.valueOr(mStartTime.ref()); mData->WriteVideoToSegment( mData->mLastVideoImage, start, start, mData->mLastVideoImageDisplaySize, currentTime + (start - currentPosition).ToTimeDuration(), &endSegment, aPrincipalHandle, mPlaybackRate); // ForwardedInputTrack drops zero duration frames, even at the end of // the track. Give the frame a minimum duration so that it is not // dropped. endSegment.ExtendLastFrameBy(1); LOG_DS(LogLevel::Debug, "compensateEOS: start %s, duration %" PRId64 ", mPlaybackRate %lf, sample rate %" PRId32, start.ToString().get(), endSegment.GetDuration(), mPlaybackRate, mData->mVideoTrack->mSampleRate); MOZ_ASSERT(endSegment.GetDuration() > 0); if (forceBlack) { endSegment.ReplaceWithDisabled(); } mData->mVideoTrackWritten += mData->mVideoTrack->AppendData(&endSegment); } mData->mListener->EndVideoTrackAt(mData->mVideoTrack, mData->mVideoTrackWritten); mData->mHaveSentFinishVideo = true; } }
TimeUnit DecodedStream::GetEndTime(TrackType aType) const { AssertOwnerThread(); TRACE("DecodedStream::GetEndTime"); if (aType == TrackInfo::kAudioTrack && mInfo.HasAudio() && mData) { auto t = mStartTime.ref() + media::TimeUnit(mData->mAudioFramesWritten, mInfo.mAudio.mRate); if (t.IsValid()) { return t; } } else if (aType == TrackInfo::kVideoTrack && mData) { return mData->mLastVideoEndTime.valueOr(mStartTime.ref()); } return TimeUnit::Zero(); }
TimeUnit DecodedStream::GetPosition(TimeStamp* aTimeStamp) { AssertOwnerThread(); TRACE("DecodedStream::GetPosition"); // This is only called after MDSM starts playback. So mStartTime is // guaranteed to be something. MOZ_ASSERT(mStartTime.isSome()); if (aTimeStamp) { *aTimeStamp = TimeStamp::Now(); } return mStartTime.ref() + mLastOutputTime; }
void DecodedStream::NotifyOutput(int64_t aTime) { AssertOwnerThread(); TimeUnit time = TimeUnit::FromMicroseconds(aTime); if (time == mLastOutputTime) { return; } MOZ_ASSERT(mLastOutputTime < time); mLastOutputTime = time; auto currentTime = GetPosition();
if (profiler_thread_is_being_profiled_for_markers()) { nsPrintfCString markerString("OutputTime=%" PRId64, currentTime.ToMicroseconds()); PLAYBACK_PROFILER_MARKER(markerString); } LOG_DS(LogLevel::Verbose, "time is now %" PRId64, currentTime.ToMicroseconds());
// Remove audio samples that have been played by MTG from the queue. RefPtr<AudioData> a = mAudioQueue.PeekFront(); for (; a && a->GetEndTime() <= currentTime;) { LOG_DS(LogLevel::Debug, "Dropping audio [%" PRId64 ",%" PRId64 "]", a->mTime.ToMicroseconds(), a->GetEndTime().ToMicroseconds()); RefPtr<AudioData> releaseMe = mAudioQueue.PopFront(); a = mAudioQueue.PeekFront(); } }
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.