/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* vim: set ts=8 sts=2 et sw=2 tw=80: */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
void HTMLVideoElement::UpdateMediaSize(const nsIntSize& aSize) {
HTMLMediaElement::UpdateMediaSize(aSize); // If we have a clone target, we should update its size as well. if (mVisualCloneTarget) {
Maybe<nsIntSize> newSize = Some(aSize);
mVisualCloneTarget->Invalidate(ImageSizeChanged::Yes, newSize,
ForceInvalidate::Yes);
}
}
Maybe<CSSIntSize> HTMLVideoElement::GetVideoSize() const { if (!mMediaInfo.HasVideo()) { return Nothing();
}
if (mDisableVideo) { return Nothing();
}
CSSIntSize size; switch (mMediaInfo.mVideo.mRotation) { case VideoRotation::kDegree_90: case VideoRotation::kDegree_270: {
size.width = mMediaInfo.mVideo.mDisplay.height;
size.height = mMediaInfo.mVideo.mDisplay.width; break;
} case VideoRotation::kDegree_0: case VideoRotation::kDegree_180: default: {
size.height = mMediaInfo.mVideo.mDisplay.height;
size.width = mMediaInfo.mVideo.mDisplay.width; break;
}
} return Some(size);
}
// Prefer the size of the container as it's more up to date. return ToMaybeRef(mVideoFrameContainer.get())
.map([&](auto& aVFC) { return aVFC.CurrentIntrinsicSize().valueOr(sz); })
.valueOr(sz);
}
uint32_t HTMLVideoElement::MozParsedFrames() const {
MOZ_ASSERT(NS_IsMainThread(), "Should be on main thread."); if (!IsVideoStatsEnabled()) { return 0;
}
if (OwnerDoc()->ShouldResistFingerprinting(
RFPTarget::VideoElementMozFrames)) { return nsRFPService::GetSpoofedTotalFrames(TotalPlayTime());
}
uint32_t HTMLVideoElement::MozDecodedFrames() const {
MOZ_ASSERT(NS_IsMainThread(), "Should be on main thread."); if (!IsVideoStatsEnabled()) { return 0;
}
if (OwnerDoc()->ShouldResistFingerprinting(
RFPTarget::VideoElementMozFrames)) { return nsRFPService::GetSpoofedTotalFrames(TotalPlayTime());
}
uint32_t HTMLVideoElement::MozPresentedFrames() {
MOZ_ASSERT(NS_IsMainThread(), "Should be on main thread."); if (!IsVideoStatsEnabled()) { return 0;
}
if (OwnerDoc()->ShouldResistFingerprinting(
RFPTarget::VideoElementMozFrames)) { return nsRFPService::GetSpoofedPresentedFrames(TotalPlayTime(),
VideoWidth(), VideoHeight());
}
double HTMLVideoElement::MozFrameDelay() {
MOZ_ASSERT(NS_IsMainThread(), "Should be on main thread.");
if (!IsVideoStatsEnabled() || OwnerDoc()->ShouldResistFingerprinting(
RFPTarget::VideoElementMozFrameDelay)) { return 0.0;
}
VideoFrameContainer* container = GetVideoFrameContainer(); // Hide negative delays. Frame timing tweaks in the compositor (e.g. // adding a bias value to prevent multiple dropped/duped frames when // frame times are aligned with composition times) may produce apparent // negative delay, but we shouldn't report that. return container ? std::max(0.0, container->GetFrameDelay()) : 0.0;
}
bool HTMLVideoElement::MozHasAudio() const {
MOZ_ASSERT(NS_IsMainThread(), "Should be on main thread."); return HasAudio();
}
bool HTMLVideoElement::ShouldCreateVideoWakeLock() const { if (!StaticPrefs::media_video_wakelock()) { returnfalse;
} // Only request wake lock for video with audio or video from media // stream, because non-stream video without audio is often used as a // background image. // // Some web conferencing sites route audio outside the video element, // and would not be detected unless we check for media stream, so do // that below. // // Media streams generally aren't used as background images, though if // they were we'd get false positives. If this is an issue, we could // check for media stream AND document has audio playing (but that was // tricky to do). return HasVideo() && (mSrcStream || HasAudio());
}
void HTMLVideoElement::CreateVideoWakeLockIfNeeded() { if (AppShutdown::IsInOrBeyond(ShutdownPhase::AppShutdownConfirmed)) { return;
} if (!mScreenWakeLock && ShouldCreateVideoWakeLock()) {
RefPtr<power::PowerManagerService> pmService =
power::PowerManagerService::GetInstance();
NS_ENSURE_TRUE_VOID(pmService);
bool HTMLVideoElement::SetVisualCloneTarget(
RefPtr<HTMLVideoElement> aVisualCloneTarget,
RefPtr<Promise> aVisualCloneTargetPromise) {
MOZ_DIAGNOSTIC_ASSERT(
!aVisualCloneTarget || aVisualCloneTarget->IsInComposedDoc(), "Can't set the clone target to a disconnected video " "element.");
MOZ_DIAGNOSTIC_ASSERT(!mVisualCloneSource, "Can't clone a video element that is already a clone."); if (!aVisualCloneTarget ||
(aVisualCloneTarget->IsInComposedDoc() && !mVisualCloneSource)) {
mVisualCloneTarget = std::move(aVisualCloneTarget);
mVisualCloneTargetPromise = std::move(aVisualCloneTargetPromise); returntrue;
} returnfalse;
}
bool HTMLVideoElement::SetVisualCloneSource(
RefPtr<HTMLVideoElement> aVisualCloneSource) {
MOZ_DIAGNOSTIC_ASSERT(
!aVisualCloneSource || aVisualCloneSource->IsInComposedDoc(), "Can't set the clone source to a disconnected video " "element.");
MOZ_DIAGNOSTIC_ASSERT(!mVisualCloneTarget, "Can't clone a video element that is already a " "clone."); if (!aVisualCloneSource ||
(aVisualCloneSource->IsInComposedDoc() && !mVisualCloneTarget)) {
mVisualCloneSource = std::move(aVisualCloneSource); returntrue;
} returnfalse;
}
double HTMLVideoElement::TotalPlayTime() const { double total = 0.0;
if (mPlayed) {
uint32_t timeRangeCount = mPlayed->Length();
for (uint32_t i = 0; i < timeRangeCount; i++) { double begin = mPlayed->Start(i); double end = mPlayed->End(i);
total += end - begin;
}
if (mCurrentPlayRangeStart != -1.0) { double now = CurrentTime(); if (mCurrentPlayRangeStart != now) {
total += now - mCurrentPlayRangeStart;
}
}
}
return total;
}
already_AddRefed<Promise> HTMLVideoElement::CloneElementVisually(
HTMLVideoElement& aTargetVideo, ErrorResult& aRv) {
MOZ_ASSERT(IsInComposedDoc(), "Can't clone a video that's not bound to a DOM tree.");
MOZ_ASSERT(aTargetVideo.IsInComposedDoc(), "Can't clone to a video that's not bound to a DOM tree."); if (!IsInComposedDoc() || !aTargetVideo.IsInComposedDoc()) {
aRv.Throw(NS_ERROR_UNEXPECTED); return nullptr;
}
// Do we already have a visual clone target? If so, shut it down. if (mVisualCloneTarget) {
EndCloningVisually();
}
// If there's a poster set on the target video, clear it, otherwise // it'll display over top of the cloned frames.
aTargetVideo.UnsetHTMLAttr(nsGkAtoms::poster, aRv); if (aRv.Failed()) { return nullptr;
}
if (!SetVisualCloneTarget(&aTargetVideo, promise)) {
aRv.Throw(NS_ERROR_FAILURE); return nullptr;
}
if (!aTargetVideo.SetVisualCloneSource(this)) {
mVisualCloneTarget = nullptr;
aRv.Throw(NS_ERROR_FAILURE); return nullptr;
}
aTargetVideo.SetMediaInfo(mMediaInfo);
if (IsInComposedDoc() && !StaticPrefs::media_cloneElementVisually_testing()) {
NotifyUAWidgetSetupOrChange();
}
MaybeBeginCloningVisually();
return promise.forget();
}
void HTMLVideoElement::StopCloningElementVisually() { if (mVisualCloneTarget) {
EndCloningVisually();
}
}
void HTMLVideoElement::MaybeBeginCloningVisually() { if (!mVisualCloneTarget) { return;
}
if (IsInComposedDoc() && !StaticPrefs::media_cloneElementVisually_testing()) {
NotifyUAWidgetSetupOrChange();
}
}
void HTMLVideoElement::OnSecondaryVideoContainerInstalled( const RefPtr<VideoFrameContainer>& aSecondaryContainer) {
MOZ_ASSERT(NS_IsMainThread());
MOZ_DIAGNOSTIC_ASSERT_IF(mVisualCloneTargetPromise, mVisualCloneTarget); if (!mVisualCloneTargetPromise) { // Clone target was unset. return;
}
VideoFrameContainer* container = mVisualCloneTarget->GetVideoFrameContainer(); if (NS_WARN_IF(container != aSecondaryContainer)) { // Not the right container. return;
}
// See the alternative part after step 4, but we only pause/resume invisible // autoplay for non-audible video, which is different from the spec. This // behavior seems aiming to reduce the power consumption without interering // users, and Chrome and Safari also chose to do that only for non-audible // video, so we want to match them in order to reduce webcompat issue. // https://html.spec.whatwg.org/multipage/media.html#ready-states:eligible-for-autoplay-2 if (!HasAttr(nsGkAtoms::autoplay) || IsAudible()) { return;
}
// We need to consider the Pip window as well, which won't reflect in the // visibility event. if ((aNewVisibility == Visibility::ApproximatelyNonVisible &&
!IsCloningElementVisually()) &&
mCanAutoplayFlag) {
LOG("pause non-audible autoplay video when it's invisible");
PauseInternal();
mCanAutoplayFlag = true; return;
}
}
// Attempt to find the next image to be presented on this tick. Note that // composited will be accurate only if the element is visible.
AutoTArray<ImageContainer::OwningImage, 4> images; if (RefPtr<layers::ImageContainer> container = GetImageContainer()) {
container->GetCurrentImages(&images);
}
// If we did not find any current images, we must have fired too early, or we // are in the process of shutting down. Wait for the next invalidation. if (images.IsEmpty()) { return;
}
// We are guaranteed that the images are in timestamp order. It is possible we // are already behind if the compositor notifications have not been processed // yet, so as per the standard, this is a best effort attempt at synchronizing // with the state of the GPU process. const ImageContainer::OwningImage* selected = nullptr; bool composited = false; for (constauto& image : images) { if (image.mTimeStamp <= aNowTime) { // Image should already have been composited. Because we might not be in // the display list, we cannot rely upon its mComposited status, and // should just assume it has indeed been composited.
selected = ℑ
composited = true;
} elseif (!aNextTickTime || image.mTimeStamp <= aNextTickTime.ref()) { // Image should be the next to be composited. mComposited will be false // if the compositor hasn't rendered the frame yet or notified us of the // render yet, but it is in progress. If it is true, then we know the // next vsync will display the frame.
selected = ℑ
composited = false;
} else { // Image is for a future composition. break;
}
}
// If all of the available images are for future compositions, we must have // fired too early. Wait for the next invalidation. if (!selected || selected->mFrameID == layers::kContainerFrameID_Invalid ||
selected->mFrameID <= mLastPresentedFrameID) { return;
}
// If we have got a dummy frame, then we must have suspended decoding and have // no actual frame to present. This should only happen if we raced on // requesting a callback, and the media state machine advancing.
gfx::IntSize frameSize = selected->mImage->GetSize(); if (NS_WARN_IF(frameSize.IsEmpty())) { return;
}
// If we have already displayed the expected frame, we need to make the // display time match the presentation time to indicate it is already // complete. if (composited) {
aMd.mExpectedDisplayTime = aMd.mPresentationTime;
}
// If we were not provided a valid media time, then we need to estimate based // on the CurrentTime from the element.
aMd.mMediaTime = selected->mMediaTime.IsValid()
? selected->mMediaTime.ToSeconds()
: CurrentTime();
// If we have a processing duration, we need to round it. // // https://wicg.github.io/video-rvfc/#security-and-privacy // // 5. Security and Privacy Considerations. // ... processingDuration exposes some under-the-hood performance information // about the video pipeline ... We therefore propose a resolution of 100μs, // which is still useful for automated quality analysis, but doesn’t offer any // new sources of high resolution information. if (selected->mProcessingDuration.IsValid()) {
aMd.mProcessingDuration.Construct(
selected->mProcessingDuration.ToBase(10000).ToSeconds());
}
#ifdef MOZ_WEBRTC // If given, this is the RTP timestamp from the last packet for the frame. if (selected->mRtpTimestamp) {
aMd.mRtpTimestamp.Construct(*selected->mRtpTimestamp);
}
// For remote sources, the capture and receive time are represented as WebRTC // timestamps relative to an origin that is specific to the WebRTC session. bool hasCaptureTimeNtp = selected->mWebrtcCaptureTime.is<int64_t>(); bool hasReceiveTimeReal = selected->mWebrtcReceiveTime.isSome(); if (mSelectedVideoStreamTrack && (hasCaptureTimeNtp || hasReceiveTimeReal)) { if (constauto* timestampMaker =
mSelectedVideoStreamTrack->GetTimestampMaker()) { if (hasCaptureTimeNtp) {
aMd.mCaptureTime.Construct(
RTCStatsTimestamp::FromNtp(
*timestampMaker,
webrtc::Timestamp::Micros(
selected->mWebrtcCaptureTime.as<int64_t>()))
.ToDom());
} if (hasReceiveTimeReal) {
aMd.mReceiveTime.Construct(
RTCStatsTimestamp::FromRealtime(
*timestampMaker,
webrtc::Timestamp::Micros(*selected->mWebrtcReceiveTime))
.ToDom());
}
}
}
// Otherwise, the capture time may be a high resolution timestamp from the // camera pipeline indicating when the sample was captured. if (selected->mWebrtcCaptureTime.is<TimeStamp>()) { if (nsPIDOMWindowInner* win = OwnerDoc()->GetInnerWindow()) { if (Performance* perf = win->GetPerformance()) {
aMd.mCaptureTime.Construct(perf->TimeStampToDOMHighResForRendering(
selected->mWebrtcCaptureTime.as<TimeStamp>()));
}
}
} #endif
// Presented frames is a bit of a misnomer from a rendering perspective, // because we still need to advance regardless of composition. Video elements // that are outside of the DOM, or are not visible, still advance the video in // the background, and presumably the caller still needs some way to know how // many frames we have advanced.
aMd.mPresentedFrames = selected->mFrameID;
void HTMLVideoElement::FinishedVideoFrameRequestCallbacks() { // After we have executed the rVFC and rAF callbacks, we need to check whether // or not we have scheduled more. If we did not, then we need to notify the // decoder, because it may be the only thing keeping the decoder fully active. if (!HasPendingCallbacks()) {
NotifyDecoderActivityChanges();
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.