Anforderungen  |   Konzepte  |   Entwurf  |   Entwicklung  |   Qualitätssicherung  |   Lebenszyklus  |   Steuerung
 
 
 
 


Quelle  MediaTrackGraph.cpp   Sprache: C

 
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
/* This Source Code Form is subject to the terms of the Mozilla Public
 * License, v. 2.0. If a copy of the MPL was not distributed with this file,
 * You can obtain one at http://mozilla.org/MPL/2.0/. */


#include "MediaTrackGraphImpl.h"
#include "mozilla/MathAlgorithms.h"
#include "mozilla/Unused.h"

#include "AudioSegment.h"
#include "CrossGraphPort.h"
#include "VideoSegment.h"
#include "nsContentUtils.h"
#include "nsGlobalWindowInner.h"
#include "nsPrintfCString.h"
#include "nsServiceManagerUtils.h"
#include "prerror.h"
#include "mozilla/Logging.h"
#include "mozilla/Attributes.h"
#include "ForwardedInputTrack.h"
#include "ImageContainer.h"
#include "AudioCaptureTrack.h"
#include "AudioDeviceInfo.h"
#include "AudioNodeTrack.h"
#include "AudioNodeExternalInputTrack.h"
#if defined(MOZ_WEBRTC)
#  include "MediaEngineWebRTCAudio.h"
#endif  // MOZ_WEBRTC
#include "MediaTrackListener.h"
#include "mozilla/dom/BaseAudioContextBinding.h"
#include "mozilla/dom/Document.h"
#include "mozilla/dom/WorkletThread.h"
#include "mozilla/media/MediaUtils.h"
#include <algorithm>
#include "GeckoProfiler.h"
#include "VideoFrameContainer.h"
#include "mozilla/AbstractThread.h"
#include "mozilla/StaticPrefs_dom.h"
#include "mozilla/StaticPrefs_media.h"
#include "transport/runnable_utils.h"
#include "VideoUtils.h"
#include "GraphRunner.h"
#include "Tracing.h"
#include "UnderrunHandler.h"
#include "mozilla/CycleCollectedJSRuntime.h"
#include "mozilla/Preferences.h"

#include "webaudio/blink/DenormalDisabler.h"
#include "webaudio/blink/HRTFDatabaseLoader.h"

using namespace mozilla::layers;
using namespace mozilla::dom;
using namespace mozilla::gfx;
using namespace mozilla::media;

namespace mozilla {

using AudioDeviceID = CubebUtils::AudioDeviceID;
using IsInShutdown = MediaTrack::IsInShutdown;

LazyLogModule gMediaTrackGraphLog("MediaTrackGraph");
#ifdef LOG
#  undef LOG
#endif  // LOG
#define LOG(type, msg) MOZ_LOG(gMediaTrackGraphLog, type, msg)

NativeInputTrack* DeviceInputTrackManager::GetNativeInputTrack() {
  return mNativeInputTrack.get();
}

DeviceInputTrack* DeviceInputTrackManager::GetDeviceInputTrack(
    CubebUtils::AudioDeviceID aID) {
  if (mNativeInputTrack && mNativeInputTrack->mDeviceId == aID) {
    return mNativeInputTrack.get();
  }
  for (const RefPtr<NonNativeInputTrack>& t : mNonNativeInputTracks) {
    if (t->mDeviceId == aID) {
      return t.get();
    }
  }
  return nullptr;
}

NonNativeInputTrack* DeviceInputTrackManager::GetFirstNonNativeInputTrack() {
  if (mNonNativeInputTracks.IsEmpty()) {
    return nullptr;
  }
  return mNonNativeInputTracks[0].get();
}

void DeviceInputTrackManager::Add(DeviceInputTrack* aTrack) {
  if (NativeInputTrack* native = aTrack->AsNativeInputTrack()) {
    MOZ_ASSERT(!mNativeInputTrack);
    mNativeInputTrack = native;
  } else {
    NonNativeInputTrack* nonNative = aTrack->AsNonNativeInputTrack();
    MOZ_ASSERT(nonNative);
    struct DeviceTrackComparator {
     public:
      bool Equals(const RefPtr<NonNativeInputTrack>& aTrack,
                  CubebUtils::AudioDeviceID aDeviceId) const {
        return aTrack->mDeviceId == aDeviceId;
      }
    };
    MOZ_ASSERT(!mNonNativeInputTracks.Contains(aTrack->mDeviceId,
                                               DeviceTrackComparator()));
    mNonNativeInputTracks.AppendElement(nonNative);
  }
}

void DeviceInputTrackManager::Remove(DeviceInputTrack* aTrack) {
  if (aTrack->AsNativeInputTrack()) {
    MOZ_ASSERT(mNativeInputTrack);
    MOZ_ASSERT(mNativeInputTrack.get() == aTrack->AsNativeInputTrack());
    mNativeInputTrack = nullptr;
  } else {
    NonNativeInputTrack* nonNative = aTrack->AsNonNativeInputTrack();
    MOZ_ASSERT(nonNative);
    DebugOnly<bool> removed = mNonNativeInputTracks.RemoveElement(nonNative);
    MOZ_ASSERT(removed);
  }
}

/**
 * A hash table containing the graph instances, one per Window ID,
 * sample rate, and device ID combination.
 */


struct MediaTrackGraphImpl::Lookup final {
  HashNumber Hash() const {
    return HashGeneric(mWindowID, mSampleRate, mOutputDeviceID);
  }
  const uint64_t mWindowID;
  const TrackRate mSampleRate;
  const CubebUtils::AudioDeviceID mOutputDeviceID;
};

// Implicit to support GraphHashSet.lookup(*graph).
MOZ_IMPLICIT MediaTrackGraphImpl::operator MediaTrackGraphImpl::Lookup() const {
  return {mWindowID, mSampleRate, PrimaryOutputDeviceID()};
}

namespace {
struct GraphHasher {  // for HashSet
  using Lookup = const MediaTrackGraphImpl::Lookup;

  static HashNumber hash(const Lookup& aLookup) { return aLookup.Hash(); }

  static bool match(const MediaTrackGraphImpl* aGraph, const Lookup& aLookup) {
    return aGraph->mWindowID == aLookup.mWindowID &&
           aGraph->GraphRate() == aLookup.mSampleRate &&
           aGraph->PrimaryOutputDeviceID() == aLookup.mOutputDeviceID;
  }
};

// The weak reference to the graph is removed when its last track is removed.
using GraphHashSet =
    HashSet<MediaTrackGraphImpl*, GraphHasher, InfallibleAllocPolicy>;
GraphHashSet* Graphs() {
  MOZ_ASSERT(NS_IsMainThread());
  static GraphHashSet sGraphs(4);  // 4 is minimum HashSet capacity
  return &sGraphs;
}
}  // anonymous namespace

static void ApplyTrackDisabling(DisabledTrackMode aDisabledMode,
                                MediaSegment* aSegment,
                                MediaSegment* aRawSegment) {
  if (aDisabledMode == DisabledTrackMode::ENABLED) {
    return;
  }
  if (aDisabledMode == DisabledTrackMode::SILENCE_BLACK) {
    aSegment->ReplaceWithDisabled();
    if (aRawSegment) {
      aRawSegment->ReplaceWithDisabled();
    }
  } else if (aDisabledMode == DisabledTrackMode::SILENCE_FREEZE) {
    aSegment->ReplaceWithNull();
    if (aRawSegment) {
      aRawSegment->ReplaceWithNull();
    }
  } else {
    MOZ_CRASH("Unsupported mode");
  }
}

MediaTrackGraphImpl::~MediaTrackGraphImpl() {
  MOZ_ASSERT(mTracks.IsEmpty() && mSuspendedTracks.IsEmpty(),
             "All tracks should have been destroyed by messages from the main "
             "thread");
  LOG(LogLevel::Debug, ("MediaTrackGraph %p destroyed"this));
  LOG(LogLevel::Debug, ("MediaTrackGraphImpl::~MediaTrackGraphImpl"));
}

void MediaTrackGraphImpl::AddTrackGraphThread(MediaTrack* aTrack) {
  MOZ_ASSERT(OnGraphThreadOrNotRunning());
  aTrack->mStartTime = mProcessedTime;

  if (aTrack->IsSuspended()) {
    mSuspendedTracks.AppendElement(aTrack);
    LOG(LogLevel::Debug,
        ("%p: Adding media track %p, in the suspended track array"this,
         aTrack));
  } else {
    mTracks.AppendElement(aTrack);
    LOG(LogLevel::Debug, ("%p: Adding media track %p, count %zu"this, aTrack,
                          mTracks.Length()));
  }

  SetTrackOrderDirty();
}

void MediaTrackGraphImpl::RemoveTrackGraphThread(MediaTrack* aTrack) {
  MOZ_ASSERT(OnGraphThreadOrNotRunning());
  // Remove references in mTrackUpdates before we allow aTrack to die.
  // Pending updates are not needed (since the main thread has already given
  // up the track) so we will just drop them.
  {
    MonitorAutoLock lock(mMonitor);
    for (uint32_t i = 0; i < mTrackUpdates.Length(); ++i) {
      if (mTrackUpdates[i].mTrack == aTrack) {
        mTrackUpdates[i].mTrack = nullptr;
      }
    }
  }

  // Ensure that mFirstCycleBreaker is updated when necessary.
  SetTrackOrderDirty();

  UnregisterAllAudioOutputs(aTrack);

  if (aTrack->IsSuspended()) {
    mSuspendedTracks.RemoveElement(aTrack);
  } else {
    mTracks.RemoveElement(aTrack);
  }

  LOG(LogLevel::Debug, ("%p: Removed media track %p, count %zu"this, aTrack,
                        mTracks.Length()));

  NS_RELEASE(aTrack);  // probably destroying it
}

TrackTime MediaTrackGraphImpl::GraphTimeToTrackTimeWithBlocking(
    const MediaTrack* aTrack, GraphTime aTime) const {
  MOZ_ASSERT(
      aTime <= mStateComputedTime,
      "Don't ask about times where we haven't made blocking decisions yet");
  return std::max<TrackTime>(
      0, std::min(aTime, aTrack->mStartBlocking) - aTrack->mStartTime);
}

void MediaTrackGraphImpl::UpdateCurrentTimeForTracks(
    GraphTime aPrevCurrentTime) {
  MOZ_ASSERT(OnGraphThread());
  for (MediaTrack* track : AllTracks()) {
    // Shouldn't have already notified of ended *and* have output!
    MOZ_ASSERT_IF(track->mStartBlocking > aPrevCurrentTime,
                  !track->mNotifiedEnded);

    // Calculate blocked time and fire Blocked/Unblocked events
    GraphTime blockedTime = mStateComputedTime - track->mStartBlocking;
    NS_ASSERTION(blockedTime >= 0, "Error in blocking time");
    track->AdvanceTimeVaryingValuesToCurrentTime(mStateComputedTime,
                                                 blockedTime);
    LOG(LogLevel::Verbose,
        ("%p: MediaTrack %p bufferStartTime=%f blockedTime=%f"this, track,
         MediaTimeToSeconds(track->mStartTime),
         MediaTimeToSeconds(blockedTime)));
    track->mStartBlocking = mStateComputedTime;

    TrackTime trackCurrentTime =
        track->GraphTimeToTrackTime(mStateComputedTime);
    if (track->mEnded) {
      MOZ_ASSERT(track->GetEnd() <= trackCurrentTime);
      if (!track->mNotifiedEnded) {
        // Playout of this track ended and listeners have not been notified.
        track->mNotifiedEnded = true;
        SetTrackOrderDirty();
        for (const auto& listener : track->mTrackListeners) {
          listener->NotifyOutput(this, track->GetEnd());
          listener->NotifyEnded(this);
        }
      }
    } else {
      for (const auto& listener : track->mTrackListeners) {
        listener->NotifyOutput(this, trackCurrentTime);
      }
    }
  }
}

template <typename C, typename Chunk>
void MediaTrackGraphImpl::ProcessChunkMetadataForInterval(MediaTrack* aTrack,
                                                          C& aSegment,
                                                          TrackTime aStart,
                                                          TrackTime aEnd) {
  MOZ_ASSERT(OnGraphThreadOrNotRunning());
  MOZ_ASSERT(aTrack);

  TrackTime offset = 0;
  for (typename C::ConstChunkIterator chunk(aSegment); !chunk.IsEnded();
       chunk.Next()) {
    if (offset >= aEnd) {
      break;
    }
    offset += chunk->GetDuration();
    if (chunk->IsNull() || offset < aStart) {
      continue;
    }
    const PrincipalHandle& principalHandle = chunk->GetPrincipalHandle();
    if (principalHandle != aSegment.GetLastPrincipalHandle()) {
      aSegment.SetLastPrincipalHandle(principalHandle);
      LOG(LogLevel::Debug,
          ("%p: MediaTrack %p, principalHandle "
           "changed in %sChunk with duration %lld",
           this, aTrack,
           aSegment.GetType() == MediaSegment::AUDIO ? "Audio" : "Video",
           (long long)chunk->GetDuration()));
      for (const auto& listener : aTrack->mTrackListeners) {
        listener->NotifyPrincipalHandleChanged(this, principalHandle);
      }
    }
  }
}

void MediaTrackGraphImpl::ProcessChunkMetadata(GraphTime aPrevCurrentTime) {
  MOZ_ASSERT(OnGraphThreadOrNotRunning());
  for (MediaTrack* track : AllTracks()) {
    TrackTime iterationStart = track->GraphTimeToTrackTime(aPrevCurrentTime);
    TrackTime iterationEnd = track->GraphTimeToTrackTime(mProcessedTime);
    if (!track->mSegment) {
      continue;
    }
    if (track->mType == MediaSegment::AUDIO) {
      ProcessChunkMetadataForInterval<AudioSegment, AudioChunk>(
          track, *track->GetData<AudioSegment>(), iterationStart, iterationEnd);
    } else if (track->mType == MediaSegment::VIDEO) {
      ProcessChunkMetadataForInterval<VideoSegment, VideoChunk>(
          track, *track->GetData<VideoSegment>(), iterationStart, iterationEnd);
    } else {
      MOZ_CRASH("Unknown track type");
    }
  }
}

GraphTime MediaTrackGraphImpl::WillUnderrun(MediaTrack* aTrack,
                                            GraphTime aEndBlockingDecisions) {
  // Ended tracks can't underrun. ProcessedMediaTracks also can't cause
  // underrun currently, since we'll always be able to produce data for them
  // unless they block on some other track.
  if (aTrack->mEnded || aTrack->AsProcessedTrack()) {
    return aEndBlockingDecisions;
  }
  // This track isn't ended or suspended. We don't need to call
  // TrackTimeToGraphTime since an underrun is the only thing that can block
  // it.
  GraphTime bufferEnd = aTrack->GetEnd() + aTrack->mStartTime;
#ifdef DEBUG
  if (bufferEnd < mProcessedTime) {
    LOG(LogLevel::Error, ("%p: MediaTrack %p underrun, "
                          "bufferEnd %f < mProcessedTime %f (%" PRId64
                          " < %" PRId64 "), TrackTime %" PRId64,
                          this, aTrack, MediaTimeToSeconds(bufferEnd),
                          MediaTimeToSeconds(mProcessedTime), bufferEnd,
                          mProcessedTime, aTrack->GetEnd()));
    NS_ASSERTION(bufferEnd >= mProcessedTime, "Buffer underran");
  }
#endif
  return std::min(bufferEnd, aEndBlockingDecisions);
}

namespace {
// Value of mCycleMarker for unvisited tracks in cycle detection.
const uint32_t NOT_VISITED = UINT32_MAX;
// Value of mCycleMarker for ordered tracks in muted cycles.
const uint32_t IN_MUTED_CYCLE = 1;
}  // namespace

bool MediaTrackGraphImpl::AudioTrackPresent() {
  MOZ_ASSERT(OnGraphThreadOrNotRunning());

  bool audioTrackPresent = false;
  for (MediaTrack* track : mTracks) {
    if (track->AsAudioNodeTrack()) {
      audioTrackPresent = true;
      break;
    }

    if (track->mType == MediaSegment::AUDIO && !track->mNotifiedEnded) {
      audioTrackPresent = true;
      break;
    }
  }

  // We may not have audio input device when we only have AudioNodeTracks. But
  // if audioTrackPresent is false, we must have no input device.
  MOZ_DIAGNOSTIC_ASSERT_IF(
      !audioTrackPresent,
      !mDeviceInputTrackManagerGraphThread.GetNativeInputTrack());

  return audioTrackPresent;
}

void MediaTrackGraphImpl::CheckDriver() {
  MOZ_ASSERT(OnGraphThread());
  // An offline graph has only one driver.
  // Otherwise, if a switch is already pending, let that happen.
  if (!mRealtime || Switching()) {
    return;
  }

  AudioCallbackDriver* audioCallbackDriver =
      CurrentDriver()->AsAudioCallbackDriver();
  if (audioCallbackDriver && !audioCallbackDriver->OnFallback()) {
    for (PendingResumeOperation& op : mPendingResumeOperations) {
      op.Apply(this);
    }
    mPendingResumeOperations.Clear();
  }

  // Note that this looks for any audio tracks, input or output, and switches
  // to a SystemClockDriver if there are none active or no resume operations
  // to make any active.
  bool needAudioCallbackDriver =
      !mPendingResumeOperations.IsEmpty() || AudioTrackPresent();
  if (!needAudioCallbackDriver) {
    if (audioCallbackDriver && audioCallbackDriver->IsStarted()) {
      SwitchAtNextIteration(
          new SystemClockDriver(this, CurrentDriver(), mSampleRate));
    }
    return;
  }

  NativeInputTrack* native =
      mDeviceInputTrackManagerGraphThread.GetNativeInputTrack();
  CubebUtils::AudioDeviceID inputDevice = native ? native->mDeviceId : nullptr;
  uint32_t inputChannelCount = AudioInputChannelCount(inputDevice);
  AudioInputType inputPreference = AudioInputDevicePreference(inputDevice);
  Maybe<AudioInputProcessingParamsRequest> processingRequest =
      ToMaybeRef(native).map([](auto& native) {
        return native.UpdateRequestedProcessingParams();
      });

  uint32_t primaryOutputChannelCount = PrimaryOutputChannelCount();
  if (!audioCallbackDriver) {
    if (primaryOutputChannelCount > 0) {
      AudioCallbackDriver* driver = new AudioCallbackDriver(
          this, CurrentDriver(), mSampleRate, primaryOutputChannelCount,
          inputChannelCount, PrimaryOutputDeviceID(), inputDevice,
          inputPreference, processingRequest);
      SwitchAtNextIteration(driver);
    }
    return;
  }

  bool needInputProcessingParamUpdate =
      processingRequest &&
      processingRequest->mGeneration !=
          audioCallbackDriver->RequestedInputProcessingParams().mGeneration;

  // Check if this graph should switch to a different number of output channels.
  // Generally, a driver switch is explicitly made by an event (e.g., setting
  // the AudioDestinationNode channelCount), but if an HTMLMediaElement is
  // directly playing back via another HTMLMediaElement, the number of channels
  // of the media determines how many channels to output, and it can change
  // dynamically.
  if (primaryOutputChannelCount != audioCallbackDriver->OutputChannelCount()) {
    if (needInputProcessingParamUpdate) {
      needInputProcessingParamUpdate = false;
    }
    AudioCallbackDriver* driver = new AudioCallbackDriver(
        this, CurrentDriver(), mSampleRate, primaryOutputChannelCount,
        inputChannelCount, PrimaryOutputDeviceID(), inputDevice,
        inputPreference, processingRequest);
    SwitchAtNextIteration(driver);
  }

  if (needInputProcessingParamUpdate) {
    needInputProcessingParamUpdate = false;
    LOG(LogLevel::Debug,
        ("%p: Setting on the fly requested processing params %s (Gen %d)"this,
         CubebUtils::ProcessingParamsToString(processingRequest->mParams).get(),
         processingRequest->mGeneration));
    audioCallbackDriver->RequestInputProcessingParams(*processingRequest);
  }
}

void MediaTrackGraphImpl::UpdateTrackOrder() {
  if (!mTrackOrderDirty) {
    return;
  }

  mTrackOrderDirty = false;

  // The algorithm for finding cycles is based on Tim Leslie's iterative
  // implementation [1][2] of Pearce's variant [3] of Tarjan's strongly
  // connected components (SCC) algorithm.  There are variations (a) to
  // distinguish whether tracks in SCCs of size 1 are in a cycle and (b) to
  // re-run the algorithm over SCCs with breaks at DelayNodes.
  //
  // [1] http://www.timl.id.au/?p=327
  // [2]
  // https://github.com/scipy/scipy/blob/e2c502fca/scipy/sparse/csgraph/_traversal.pyx#L582
  // [3] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.102.1707
  //
  // There are two stacks.  One for the depth-first search (DFS),
  mozilla::LinkedList<MediaTrack> dfsStack;
  // and another for tracks popped from the DFS stack, but still being
  // considered as part of SCCs involving tracks on the stack.
  mozilla::LinkedList<MediaTrack> sccStack;

  // An index into mTracks for the next track found with no unsatisfied
  // upstream dependencies.
  uint32_t orderedTrackCount = 0;

  for (uint32_t i = 0; i < mTracks.Length(); ++i) {
    MediaTrack* t = mTracks[i];
    ProcessedMediaTrack* pt = t->AsProcessedTrack();
    if (pt) {
      // The dfsStack initially contains a list of all processed tracks in
      // unchanged order.
      dfsStack.insertBack(t);
      pt->mCycleMarker = NOT_VISITED;
    } else {
      // SourceMediaTracks have no inputs and so can be ordered now.
      mTracks[orderedTrackCount] = t;
      ++orderedTrackCount;
    }
  }

  // mNextStackMarker corresponds to "index" in Tarjan's algorithm.  It is a
  // counter to label mCycleMarker on the next visited track in the DFS
  // uniquely in the set of visited tracks that are still being considered.
  //
  // In this implementation, the counter descends so that the values are
  // strictly greater than the values that mCycleMarker takes when the track
  // has been ordered (0 or IN_MUTED_CYCLE).
  //
  // Each new track labelled, as the DFS searches upstream, receives a value
  // less than those used for all other tracks being considered.
  uint32_t nextStackMarker = NOT_VISITED - 1;
  // Reset list of DelayNodes in cycles stored at the tail of mTracks.
  mFirstCycleBreaker = mTracks.Length();

  // Rearrange dfsStack order as required to DFS upstream and pop tracks
  // in processing order to place in mTracks.
  while (auto pt = static_cast<ProcessedMediaTrack*>(dfsStack.getFirst())) {
    const auto& inputs = pt->mInputs;
    MOZ_ASSERT(pt->AsProcessedTrack());
    if (pt->mCycleMarker == NOT_VISITED) {
      // Record the position on the visited stack, so that any searches
      // finding this track again know how much of the stack is in the cycle.
      pt->mCycleMarker = nextStackMarker;
      --nextStackMarker;
      // Not-visited input tracks should be processed first.
      // SourceMediaTracks have already been ordered.
      for (uint32_t i = inputs.Length(); i--;) {
        if (inputs[i]->GetSource()->IsSuspended()) {
          continue;
        }
        auto input = inputs[i]->GetSource()->AsProcessedTrack();
        if (input && input->mCycleMarker == NOT_VISITED) {
          // It can be that this track has an input which is from a suspended
          // AudioContext.
          if (input->isInList()) {
            input->remove();
            dfsStack.insertFront(input);
          }
        }
      }
      continue;
    }

    // Returning from DFS.  Pop from dfsStack.
    pt->remove();

    // cycleStackMarker keeps track of the highest marker value on any
    // upstream track, if any, found receiving input, directly or indirectly,
    // from the visited stack (and so from |ps|, making a cycle).  In a
    // variation from Tarjan's SCC algorithm, this does not include |ps|
    // unless it is part of the cycle.
    uint32_t cycleStackMarker = 0;
    for (uint32_t i = inputs.Length(); i--;) {
      if (inputs[i]->GetSource()->IsSuspended()) {
        continue;
      }
      auto input = inputs[i]->GetSource()->AsProcessedTrack();
      if (input) {
        cycleStackMarker = std::max(cycleStackMarker, input->mCycleMarker);
      }
    }

    if (cycleStackMarker <= IN_MUTED_CYCLE) {
      // All inputs have been ordered and their stack markers have been removed.
      // This track is not part of a cycle.  It can be processed next.
      pt->mCycleMarker = 0;
      mTracks[orderedTrackCount] = pt;
      ++orderedTrackCount;
      continue;
    }

    // A cycle has been found.  Record this track for ordering when all
    // tracks in this SCC have been popped from the DFS stack.
    sccStack.insertFront(pt);

    if (cycleStackMarker > pt->mCycleMarker) {
      // Cycles have been found that involve tracks that remain on the stack.
      // Leave mCycleMarker indicating the most downstream (last) track on
      // the stack known to be part of this SCC.  In this way, any searches on
      // other paths that find |ps| will know (without having to traverse from
      // this track again) that they are part of this SCC (i.e. part of an
      // intersecting cycle).
      pt->mCycleMarker = cycleStackMarker;
      continue;
    }

    // |pit| is the root of an SCC involving no other tracks on dfsStack, the
    // complete SCC has been recorded, and tracks in this SCC are part of at
    // least one cycle.
    MOZ_ASSERT(cycleStackMarker == pt->mCycleMarker);
    // If there are DelayNodes in this SCC, then they may break the cycles.
    bool haveDelayNode = false;
    auto next = sccStack.getFirst();
    // Tracks in this SCC are identified by mCycleMarker <= cycleStackMarker.
    // (There may be other tracks later in sccStack from other incompletely
    // searched SCCs, involving tracks still on dfsStack.)
    //
    // DelayNodes in cycles must behave differently from those not in cycles,
    // so all DelayNodes in the SCC must be identified.
    while (next && static_cast<ProcessedMediaTrack*>(next)->mCycleMarker <=
                       cycleStackMarker) {
      auto nt = next->AsAudioNodeTrack();
      // Get next before perhaps removing from list below.
      next = next->getNext();
      if (nt && nt->Engine()->AsDelayNodeEngine()) {
        haveDelayNode = true;
        // DelayNodes break cycles by producing their output in a
        // preprocessing phase; they do not need to be ordered before their
        // consumers.  Order them at the tail of mTracks so that they can be
        // handled specially.  Do so now, so that DFS ignores them.
        nt->remove();
        nt->mCycleMarker = 0;
        --mFirstCycleBreaker;
        mTracks[mFirstCycleBreaker] = nt;
      }
    }
    auto after_scc = next;
    while ((next = sccStack.getFirst()) != after_scc) {
      next->remove();
      auto removed = static_cast<ProcessedMediaTrack*>(next);
      if (haveDelayNode) {
        // Return tracks to the DFS stack again (to order and detect cycles
        // without delayNodes).  Any of these tracks that are still inputs
        // for tracks on the visited stack must be returned to the front of
        // the stack to be ordered before their dependents.  We know that none
        // of these tracks need input from tracks on the visited stack, so
        // they can all be searched and ordered before the current stack head
        // is popped.
        removed->mCycleMarker = NOT_VISITED;
        dfsStack.insertFront(removed);
      } else {
        // Tracks in cycles without any DelayNodes must be muted, and so do
        // not need input and can be ordered now.  They must be ordered before
        // their consumers so that their muted output is available.
        removed->mCycleMarker = IN_MUTED_CYCLE;
        mTracks[orderedTrackCount] = removed;
        ++orderedTrackCount;
      }
    }
  }

  MOZ_ASSERT(orderedTrackCount == mFirstCycleBreaker);
}

TrackTime MediaTrackGraphImpl::PlayAudio(const TrackAndVolume& aOutput,
                                         GraphTime aPlayedTime,
                                         uint32_t aOutputChannelCount) {
  MOZ_ASSERT(OnGraphThread());
  MOZ_ASSERT(mRealtime, "Should only attempt to play audio in realtime mode");

  TrackTime ticksWritten = 0;

  ticksWritten = 0;
  MediaTrack* track = aOutput.mTrack;
  AudioSegment* audio = track->GetData<AudioSegment>();
  AudioSegment output;

  TrackTime offset = track->GraphTimeToTrackTime(aPlayedTime);

  // We don't update Track->mTracksStartTime here to account for time spent
  // blocked. Instead, we'll update it in UpdateCurrentTimeForTracks after
  // the blocked period has completed. But we do need to make sure we play
  // from the right offsets in the track buffer, even if we've already
  // written silence for some amount of blocked time after the current time.
  GraphTime t = aPlayedTime;
  while (t < mStateComputedTime) {
    bool blocked = t >= track->mStartBlocking;
    GraphTime end = blocked ? mStateComputedTime : track->mStartBlocking;
    NS_ASSERTION(end <= mStateComputedTime, "mStartBlocking is wrong!");

    // Check how many ticks of sound we can provide if we are blocked some
    // time in the middle of this cycle.
    TrackTime toWrite = end - t;

    if (blocked) {
      output.InsertNullDataAtStart(toWrite);
      ticksWritten += toWrite;
      LOG(LogLevel::Verbose,
          ("%p: MediaTrack %p writing %" PRId64 " blocking-silence samples for "
           "%f to %f (%" PRId64 " to %" PRId64 ")",
           this, track, toWrite, MediaTimeToSeconds(t), MediaTimeToSeconds(end),
           offset, offset + toWrite));
    } else {
      TrackTime endTicksNeeded = offset + toWrite;
      TrackTime endTicksAvailable = audio->GetDuration();

      if (endTicksNeeded <= endTicksAvailable) {
        LOG(LogLevel::Verbose,
            ("%p: MediaTrack %p writing %" PRId64 " samples for %f to %f "
             "(samples %" PRId64 " to %" PRId64 ")",
             this, track, toWrite, MediaTimeToSeconds(t),
             MediaTimeToSeconds(end), offset, endTicksNeeded));
        output.AppendSlice(*audio, offset, endTicksNeeded);
        ticksWritten += toWrite;
        offset = endTicksNeeded;
      } else {
        // MOZ_ASSERT(track->IsEnded(), "Not enough data, and track not
        // ended."); If we are at the end of the track, maybe write the
        // remaining samples, and pad with/output silence.
        if (endTicksNeeded > endTicksAvailable && offset < endTicksAvailable) {
          output.AppendSlice(*audio, offset, endTicksAvailable);

          LOG(LogLevel::Verbose,
              ("%p: MediaTrack %p writing %" PRId64 " samples for %f to %f "
               "(samples %" PRId64 " to %" PRId64 ")",
               this, track, toWrite, MediaTimeToSeconds(t),
               MediaTimeToSeconds(end), offset, endTicksNeeded));
          uint32_t available = endTicksAvailable - offset;
          ticksWritten += available;
          toWrite -= available;
          offset = endTicksAvailable;
        }
        output.AppendNullData(toWrite);
        LOG(LogLevel::Verbose,
            ("%p MediaTrack %p writing %" PRId64 " padding slsamples for %f to "
             "%f (samples %" PRId64 " to %" PRId64 ")",
             this, track, toWrite, MediaTimeToSeconds(t),
             MediaTimeToSeconds(end), offset, endTicksNeeded));
        ticksWritten += toWrite;
      }
      output.ApplyVolume(mGlobalVolume * aOutput.mVolume);
    }
    t = end;

    output.Mix(mMixer, aOutputChannelCount, mSampleRate);
  }
  return ticksWritten;
}

DeviceInputTrack* MediaTrackGraph::GetDeviceInputTrackMainThread(
    CubebUtils::AudioDeviceID aID) {
  MOZ_ASSERT(NS_IsMainThread());
  auto* impl = static_cast<MediaTrackGraphImpl*>(this);
  return impl->mDeviceInputTrackManagerMainThread.GetDeviceInputTrack(aID);
}

NativeInputTrack* MediaTrackGraph::GetNativeInputTrackMainThread() {
  MOZ_ASSERT(NS_IsMainThread());
  auto* impl = static_cast<MediaTrackGraphImpl*>(this);
  return impl->mDeviceInputTrackManagerMainThread.GetNativeInputTrack();
}

void MediaTrackGraphImpl::OpenAudioInputImpl(DeviceInputTrack* aTrack) {
  MOZ_ASSERT(OnGraphThread());
  LOG(LogLevel::Debug,
      ("%p OpenAudioInputImpl: device %p"this, aTrack->mDeviceId));

  mDeviceInputTrackManagerGraphThread.Add(aTrack);

  if (aTrack->AsNativeInputTrack()) {
    // Switch Drivers since we're adding input (to input-only or full-duplex)
    AudioCallbackDriver* driver = new AudioCallbackDriver(
        this, CurrentDriver(), mSampleRate, PrimaryOutputChannelCount(),
        AudioInputChannelCount(aTrack->mDeviceId), PrimaryOutputDeviceID(),
        aTrack->mDeviceId, AudioInputDevicePreference(aTrack->mDeviceId),
        Some(aTrack->UpdateRequestedProcessingParams()));
    LOG(LogLevel::Debug,
        ("%p OpenAudioInputImpl: starting new AudioCallbackDriver(input) %p",
         this, driver));
    SwitchAtNextIteration(driver);
  } else {
    NonNativeInputTrack* nonNative = aTrack->AsNonNativeInputTrack();
    MOZ_ASSERT(nonNative);
    // Start non-native input right away.
    nonNative->StartAudio(MakeRefPtr<AudioInputSource>(
        MakeRefPtr<AudioInputSourceListener>(nonNative),
        nonNative->GenerateSourceId(), nonNative->mDeviceId,
        AudioInputChannelCount(nonNative->mDeviceId),
        AudioInputDevicePreference(nonNative->mDeviceId) ==
            AudioInputType::Voice,
        nonNative->mPrincipalHandle, nonNative->mSampleRate, GraphRate()));
  }
}

void MediaTrackGraphImpl::OpenAudioInput(DeviceInputTrack* aTrack) {
  MOZ_ASSERT(NS_IsMainThread());
  MOZ_ASSERT(aTrack);

  LOG(LogLevel::Debug, ("%p OpenInput: DeviceInputTrack %p for device %p"this,
                        aTrack, aTrack->mDeviceId));

  class Message : public ControlMessage {
   public:
    Message(MediaTrackGraphImpl* aGraph, DeviceInputTrack* aInputTrack)
        : ControlMessage(nullptr), mGraph(aGraph), mInputTrack(aInputTrack) {}
    void Run() override {
      TRACE("MTG::OpenAudioInputImpl ControlMessage");
      mGraph->OpenAudioInputImpl(mInputTrack);
    }
    MediaTrackGraphImpl* mGraph;
    DeviceInputTrack* mInputTrack;
  };

  mDeviceInputTrackManagerMainThread.Add(aTrack);

  this->AppendMessage(MakeUnique<Message>(this, aTrack));
}

void MediaTrackGraphImpl::CloseAudioInputImpl(DeviceInputTrack* aTrack) {
  MOZ_ASSERT(OnGraphThread());

  LOG(LogLevel::Debug,
      ("%p CloseAudioInputImpl: device %p"this, aTrack->mDeviceId));

  if (NonNativeInputTrack* nonNative = aTrack->AsNonNativeInputTrack()) {
    nonNative->StopAudio();
    mDeviceInputTrackManagerGraphThread.Remove(aTrack);
    return;
  }

  MOZ_ASSERT(aTrack->AsNativeInputTrack());

  mDeviceInputTrackManagerGraphThread.Remove(aTrack);

  // Switch Drivers since we're adding or removing an input (to nothing/system
  // or output only)
  bool audioTrackPresent = AudioTrackPresent();

  GraphDriver* driver;
  if (audioTrackPresent) {
    // We still have audio output
    LOG(LogLevel::Debug,
        ("%p: CloseInput: output present (AudioCallback)"this));

    driver = new AudioCallbackDriver(
        this, CurrentDriver(), mSampleRate, PrimaryOutputChannelCount(),
        AudioInputChannelCount(aTrack->mDeviceId), PrimaryOutputDeviceID(),
        nullptr, AudioInputDevicePreference(aTrack->mDeviceId),
        Some(aTrack->UpdateRequestedProcessingParams()));
    SwitchAtNextIteration(driver);
  } else if (CurrentDriver()->AsAudioCallbackDriver()) {
    LOG(LogLevel::Debug,
        ("%p: CloseInput: no output present (SystemClockCallback)"this));

    driver = new SystemClockDriver(this, CurrentDriver(), mSampleRate);
    SwitchAtNextIteration(driver);
  }  // else SystemClockDriver->SystemClockDriver, no switch
}

void MediaTrackGraphImpl::UnregisterAllAudioOutputs(MediaTrack* aTrack) {
  MOZ_ASSERT(OnGraphThreadOrNotRunning());
  mOutputDevices.RemoveElementsBy([&](OutputDeviceEntry& aDeviceRef) {
    aDeviceRef.mTrackOutputs.RemoveElement(aTrack);
    // mReceiver is null for the primary output device, which is retained for
    // AudioCallbackDriver output even when no tracks have audio outputs.
    return aDeviceRef.mTrackOutputs.IsEmpty() && aDeviceRef.mReceiver;
  });
}

void MediaTrackGraphImpl::CloseAudioInput(DeviceInputTrack* aTrack) {
  MOZ_ASSERT(NS_IsMainThread());
  MOZ_ASSERT(aTrack);

  LOG(LogLevel::Debug, ("%p CloseInput: DeviceInputTrack %p for device %p",
                        this, aTrack, aTrack->mDeviceId));

  class Message : public ControlMessage {
   public:
    Message(MediaTrackGraphImpl* aGraph, DeviceInputTrack* aInputTrack)
        : ControlMessage(nullptr), mGraph(aGraph), mInputTrack(aInputTrack) {}
    void Run() override {
      TRACE("MTG::CloseAudioInputImpl ControlMessage");
      mGraph->CloseAudioInputImpl(mInputTrack);
    }
    MediaTrackGraphImpl* mGraph;
    DeviceInputTrack* mInputTrack;
  };

  // DeviceInputTrack is still alive (in mTracks) even we remove it here, since
  // aTrack->Destroy() is called after this. See DeviceInputTrack::CloseAudio
  // for more details.
  mDeviceInputTrackManagerMainThread.Remove(aTrack);

  this->AppendMessage(MakeUnique<Message>(this, aTrack));

  if (aTrack->AsNativeInputTrack()) {
    LOG(LogLevel::Debug,
        ("%p Native input device %p is closed!"this, aTrack->mDeviceId));
    SetNewNativeInput();
  }
}

// All AudioInput listeners get the same speaker data (at least for now).
void MediaTrackGraphImpl::NotifyOutputData(const AudioChunk& aChunk) {
  if (!mDeviceInputTrackManagerGraphThread.GetNativeInputTrack()) {
    return;
  }

#if defined(MOZ_WEBRTC)
  for (const auto& track : mTracks) {
    if (const auto& t = track->AsAudioProcessingTrack()) {
      t->NotifyOutputData(this, aChunk);
    }
  }
#endif
}

void MediaTrackGraphImpl::NotifyInputStopped() {
  NativeInputTrack* native =
      mDeviceInputTrackManagerGraphThread.GetNativeInputTrack();
  if (!native) {
    return;
  }
  native->NotifyInputStopped(this);
}

void MediaTrackGraphImpl::NotifyInputData(const AudioDataValue* aBuffer,
                                          size_t aFrames, TrackRate aRate,
                                          uint32_t aChannels,
                                          uint32_t aAlreadyBuffered) {
  // Either we have an audio input device, or we just removed the audio input
  // this iteration, and we're switching back to an output-only driver next
  // iteration.
  NativeInputTrack* native =
      mDeviceInputTrackManagerGraphThread.GetNativeInputTrack();
  MOZ_ASSERT(native || Switching());
  if (!native) {
    return;
  }
  native->NotifyInputData(this, aBuffer, aFrames, aRate, aChannels,
                          aAlreadyBuffered);
}

void MediaTrackGraphImpl::NotifySetRequestedInputProcessingParamsResult(
    AudioCallbackDriver* aDriver, int aGeneration,
    Result<cubeb_input_processing_params, int>&& aResult) {
  MOZ_ASSERT(NS_IsMainThread());
  NativeInputTrack* native =
      mDeviceInputTrackManagerMainThread.GetNativeInputTrack();
  if (!native) {
    return;
  }
  QueueControlMessageWithNoShutdown([this, self = RefPtr(this),
                                     driver = RefPtr(aDriver), aGeneration,
                                     result = std::move(aResult)]() mutable {
    NativeInputTrack* native =
        mDeviceInputTrackManagerGraphThread.GetNativeInputTrack();
    if (!native) {
      return;
    }
    if (driver != mDriver) {
      return;
    }
    native->NotifySetRequestedProcessingParamsResult(this, aGeneration, result);
  });
}

void MediaTrackGraphImpl::DeviceChangedImpl() {
  MOZ_ASSERT(OnGraphThread());
  NativeInputTrack* native =
      mDeviceInputTrackManagerGraphThread.GetNativeInputTrack();
  if (!native) {
    return;
  }
  native->DeviceChanged(this);
}

void MediaTrackGraphImpl::SetMaxOutputChannelCount(uint32_t aMaxChannelCount) {
  MOZ_ASSERT(OnGraphThread());
  mMaxOutputChannelCount = aMaxChannelCount;
}

void MediaTrackGraphImpl::DeviceChanged() {
  // This is safe to be called from any thread: this message comes from an
  // underlying platform API, and we don't have much guarantees. If it is not
  // called from the main thread (and it probably will rarely be), it will post
  // itself to the main thread, and the actual device change message will be ran
  // and acted upon on the graph thread.
  if (!NS_IsMainThread()) {
    RefPtr<nsIRunnable> runnable = WrapRunnable(
        RefPtr<MediaTrackGraphImpl>(this), &MediaTrackGraphImpl::DeviceChanged);
    mMainThread->Dispatch(runnable.forget());
    return;
  }

  class Message : public ControlMessage {
   public:
    explicit Message(MediaTrackGraph* aGraph)
        : ControlMessage(nullptr),
          mGraphImpl(static_cast<MediaTrackGraphImpl*>(aGraph)) {}
    void Run() override {
      TRACE("MTG::DeviceChangeImpl ControlMessage");
      mGraphImpl->DeviceChangedImpl();
    }
    // We know that this is valid, because the graph can't shutdown if it has
    // messages.
    MediaTrackGraphImpl* mGraphImpl;
  };

  if (mMainThreadTrackCount == 0 && mMainThreadPortCount == 0) {
    // This is a special case where the origin of this event cannot control the
    // lifetime of the graph, because the graph is controling the lifetime of
    // the AudioCallbackDriver where the event originated.
    // We know the graph is soon going away, so there's no need to notify about
    // this device change.
    return;
  }

  // Reset the latency, it will get fetched again next time it's queried.
  MOZ_ASSERT(NS_IsMainThread());
  mAudioOutputLatency = 0.0;

  // Dispatch to the bg thread to do the (potentially expensive) query of the
  // maximum channel count, and then dispatch back to the main thread, then to
  // the graph, with the new info. The "special case" above is to be handled
  // back on the main thread as well for the same reasons.
  RefPtr<MediaTrackGraphImpl> self = this;
  NS_DispatchBackgroundTask(NS_NewRunnableFunction(
      "MaxChannelCountUpdateOnBgThread", [self{std::move(self)}]() {
        uint32_t maxChannelCount = CubebUtils::MaxNumberOfChannels();
        self->Dispatch(NS_NewRunnableFunction(
            "MaxChannelCountUpdateToMainThread",
            [self{self}, maxChannelCount]() {
              class MessageToGraph : public ControlMessage {
               public:
                explicit MessageToGraph(MediaTrackGraph* aGraph,
                                        uint32_t aMaxChannelCount)
                    : ControlMessage(nullptr),
                      mGraphImpl(static_cast<MediaTrackGraphImpl*>(aGraph)),
                      mMaxChannelCount(aMaxChannelCount) {}
                void Run() override {
                  TRACE("MTG::SetMaxOutputChannelCount ControlMessage")
                  mGraphImpl->SetMaxOutputChannelCount(mMaxChannelCount);
                }
                MediaTrackGraphImpl* mGraphImpl;
                uint32_t mMaxChannelCount;
              };

              if (self->mMainThreadTrackCount == 0 &&
                  self->mMainThreadPortCount == 0) {
                // See comments above.
                return;
              }

              self->AppendMessage(
                  MakeUnique<MessageToGraph>(self, maxChannelCount));
            }));
      }));

  AppendMessage(MakeUnique<Message>(this));
}

static const char* GetAudioInputTypeString(const AudioInputType& aType) {
  return aType == AudioInputType::Voice ? "Voice" : "Unknown";
}

void MediaTrackGraph::ReevaluateInputDevice(CubebUtils::AudioDeviceID aID) {
  MOZ_ASSERT(OnGraphThread());
  auto* impl = static_cast<MediaTrackGraphImpl*>(this);
  impl->ReevaluateInputDevice(aID);
}

void MediaTrackGraphImpl::ReevaluateInputDevice(CubebUtils::AudioDeviceID aID) {
  MOZ_ASSERT(OnGraphThread());
  LOG(LogLevel::Debug, ("%p: ReevaluateInputDevice: device %p"this, aID));

  DeviceInputTrack* track =
      mDeviceInputTrackManagerGraphThread.GetDeviceInputTrack(aID);
  if (!track) {
    LOG(LogLevel::Debug,
        ("%p: No DeviceInputTrack for this device. Ignore"this));
    return;
  }

  bool needToSwitch = false;

  if (NonNativeInputTrack* nonNative = track->AsNonNativeInputTrack()) {
    if (nonNative->NumberOfChannels() != AudioInputChannelCount(aID)) {
      LOG(LogLevel::Debug,
          ("%p: %u-channel non-native input device %p (track %p) is "
           "re-configured to %d-channel",
           this, nonNative->NumberOfChannels(), aID, track,
           AudioInputChannelCount(aID)));
      needToSwitch = true;
    }
    if (nonNative->DevicePreference() != AudioInputDevicePreference(aID)) {
      LOG(LogLevel::Debug,
          ("%p: %s-type non-native input device %p (track %p) is re-configured "
           "to %s-type",
           this, GetAudioInputTypeString(nonNative->DevicePreference()), aID,
           track, GetAudioInputTypeString(AudioInputDevicePreference(aID))));
      needToSwitch = true;
    }

    if (needToSwitch) {
      nonNative->StopAudio();
      nonNative->StartAudio(MakeRefPtr<AudioInputSource>(
          MakeRefPtr<AudioInputSourceListener>(nonNative),
          nonNative->GenerateSourceId(), aID, AudioInputChannelCount(aID),
          AudioInputDevicePreference(aID) == AudioInputType::Voice,
          nonNative->mPrincipalHandle, nonNative->mSampleRate, GraphRate()));
    }

    return;
  }

  MOZ_ASSERT(track->AsNativeInputTrack());

  if (AudioCallbackDriver* audioCallbackDriver =
          CurrentDriver()->AsAudioCallbackDriver()) {
    if (audioCallbackDriver->InputChannelCount() !=
        AudioInputChannelCount(aID)) {
      LOG(LogLevel::Debug,
          ("%p: ReevaluateInputDevice: %u-channel AudioCallbackDriver %p is "
           "re-configured to %d-channel",
           this, audioCallbackDriver->InputChannelCount(), audioCallbackDriver,
           AudioInputChannelCount(aID)));
      needToSwitch = true;
    }
    if (audioCallbackDriver->InputDevicePreference() !=
        AudioInputDevicePreference(aID)) {
      LOG(LogLevel::Debug,
          ("%p: ReevaluateInputDevice: %s-type AudioCallbackDriver %p is "
           "re-configured to %s-type",
           this,
           GetAudioInputTypeString(
               audioCallbackDriver->InputDevicePreference()),
           audioCallbackDriver,
           GetAudioInputTypeString(AudioInputDevicePreference(aID))));
      needToSwitch = true;
    }
  } else if (Switching() && NextDriver()->AsAudioCallbackDriver()) {
    // We're already in the process of switching to a audio callback driver,
    // which will happen at the next iteration.
    // However, maybe it's not the correct number of channels. Re-query the
    // correct channel amount at this time.
    needToSwitch = true;
  }

  if (needToSwitch) {
    AudioCallbackDriver* newDriver = new AudioCallbackDriver(
        this, CurrentDriver(), mSampleRate, PrimaryOutputChannelCount(),
        AudioInputChannelCount(aID), PrimaryOutputDeviceID(), aID,
        AudioInputDevicePreference(aID),
        Some(track->UpdateRequestedProcessingParams()));
    SwitchAtNextIteration(newDriver);
  }
}

bool MediaTrackGraphImpl::OnGraphThreadOrNotRunning() const {
  // either we're on the right thread (and calling CurrentDriver() is safe),
  // or we're going to fail the assert anyway, so don't cross-check
  // via CurrentDriver().
  return mGraphDriverRunning ? OnGraphThread() : NS_IsMainThread();
}

bool MediaTrackGraphImpl::OnGraphThread() const {
  // we're on the right thread (and calling mDriver is safe),
  MOZ_ASSERT(mDriver);
  if (mGraphRunner && mGraphRunner->OnThread()) {
    return true;
  }
  return mDriver->OnThread();
}

bool MediaTrackGraphImpl::Destroyed() const {
  MOZ_ASSERT(NS_IsMainThread());
  return !mSelfRef;
}

bool MediaTrackGraphImpl::ShouldUpdateMainThread() {
  MOZ_ASSERT(OnGraphThreadOrNotRunning());
  if (mRealtime) {
    return true;
  }

  TimeStamp now = TimeStamp::Now();
  // For offline graphs, update now if it has been long enough since the last
  // update, or if it has reached the end.
  if ((now - mLastMainThreadUpdate).ToMilliseconds() >
          CurrentDriver()->IterationDuration() ||
      mStateComputedTime >= mEndTime) {
    mLastMainThreadUpdate = now;
    return true;
  }
  return false;
}

void MediaTrackGraphImpl::PrepareUpdatesToMainThreadState(bool aFinalUpdate) {
  MOZ_ASSERT(OnGraphThreadOrNotRunning());
  mMonitor.AssertCurrentThreadOwns();

  // We don't want to frequently update the main thread about timing update
  // when we are not running in realtime.
  if (aFinalUpdate || ShouldUpdateMainThread()) {
    // Strip updates that will be obsoleted below, so as to keep the length of
    // mTrackUpdates sane.
    size_t keptUpdateCount = 0;
    for (size_t i = 0; i < mTrackUpdates.Length(); ++i) {
      MediaTrack* track = mTrackUpdates[i].mTrack;
      // RemoveTrackGraphThread() clears mTrack in updates for
      // tracks that are removed from the graph.
      MOZ_ASSERT(!track || track->GraphImpl() == this);
      if (!track || track->MainThreadNeedsUpdates()) {
        // Discard this update as it has either been cleared when the track
        // was destroyed or there will be a newer update below.
        continue;
      }
      if (keptUpdateCount != i) {
        mTrackUpdates[keptUpdateCount] = std::move(mTrackUpdates[i]);
        MOZ_ASSERT(!mTrackUpdates[i].mTrack);
      }
      ++keptUpdateCount;
    }
    mTrackUpdates.TruncateLength(keptUpdateCount);

    mTrackUpdates.SetCapacity(mTrackUpdates.Length() + mTracks.Length() +
                              mSuspendedTracks.Length());
    for (MediaTrack* track : AllTracks()) {
      if (!track->MainThreadNeedsUpdates()) {
        continue;
      }
      TrackUpdate* update = mTrackUpdates.AppendElement();
      update->mTrack = track;
      // No blocking to worry about here, since we've passed
      // UpdateCurrentTimeForTracks.
      update->mNextMainThreadCurrentTime =
          track->GraphTimeToTrackTime(mProcessedTime);
      update->mNextMainThreadEnded = track->mNotifiedEnded;
    }
    mNextMainThreadGraphTime = mProcessedTime;
    if (!mPendingUpdateRunnables.IsEmpty()) {
      mUpdateRunnables.AppendElements(std::move(mPendingUpdateRunnables));
    }
  }

  // If this is the final update, then a stable state event will soon be
  // posted just before this thread finishes, and so there is no need to also
  // post here.
  if (!aFinalUpdate &&
      // Don't send the message to the main thread if it's not going to have
      // any work to do.
      !(mUpdateRunnables.IsEmpty() && mTrackUpdates.IsEmpty())) {
    EnsureStableStateEventPosted();
  }
}

GraphTime MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(GraphTime aTime) {
  if (aTime % WEBAUDIO_BLOCK_SIZE == 0) {
    return aTime;
  }
  return RoundUpToNextAudioBlock(aTime);
}

GraphTime MediaTrackGraphImpl::RoundUpToNextAudioBlock(GraphTime aTime) {
  uint64_t block = aTime >> WEBAUDIO_BLOCK_SIZE_BITS;
  uint64_t nextBlock = block + 1;
  GraphTime nextTime = nextBlock << WEBAUDIO_BLOCK_SIZE_BITS;
  return nextTime;
}

void MediaTrackGraphImpl::ProduceDataForTracksBlockByBlock(
    uint32_t aTrackIndex, TrackRate aSampleRate) {
  MOZ_ASSERT(OnGraphThread());
  MOZ_ASSERT(aTrackIndex <= mFirstCycleBreaker,
             "Cycle breaker is not AudioNodeTrack?");

  while (mProcessedTime < mStateComputedTime) {
    // Microtask checkpoints are in between render quanta.
    nsAutoMicroTask mt;

    GraphTime next = RoundUpToNextAudioBlock(mProcessedTime);
    for (uint32_t i = mFirstCycleBreaker; i < mTracks.Length(); ++i) {
      auto nt = static_cast<AudioNodeTrack*>(mTracks[i]);
      MOZ_ASSERT(nt->AsAudioNodeTrack());
      nt->ProduceOutputBeforeInput(mProcessedTime);
    }
    for (uint32_t i = aTrackIndex; i < mTracks.Length(); ++i) {
      ProcessedMediaTrack* pt = mTracks[i]->AsProcessedTrack();
      if (pt) {
        pt->ProcessInput(
            mProcessedTime, next,
            (next == mStateComputedTime) ? ProcessedMediaTrack::ALLOW_END : 0);
      }
    }
    mProcessedTime = next;
  }
  NS_ASSERTION(mProcessedTime == mStateComputedTime,
               "Something went wrong with rounding to block boundaries");
}

void MediaTrackGraphImpl::RunMessageAfterProcessing(
    UniquePtr<ControlMessageInterface> aMessage) {
  MOZ_ASSERT(OnGraphThread());

  if (mFrontMessageQueue.IsEmpty()) {
    mFrontMessageQueue.AppendElement();
  }

  // Only one block is used for messages from the graph thread.
  MOZ_ASSERT(mFrontMessageQueue.Length() == 1);
  mFrontMessageQueue[0].mMessages.AppendElement(std::move(aMessage));
}

void MediaTrackGraphImpl::RunMessagesInQueue() {
  TRACE("MTG::RunMessagesInQueue");
  MOZ_ASSERT(OnGraphThread());
  // Calculate independent action times for each batch of messages (each
  // batch corresponding to an event loop task). This isolates the performance
  // of different scripts to some extent.
  for (uint32_t i = 0; i < mFrontMessageQueue.Length(); ++i) {
    nsTArray<UniquePtr<ControlMessageInterface>>& messages =
        mFrontMessageQueue[i].mMessages;

    for (uint32_t j = 0; j < messages.Length(); ++j) {
      TRACE("ControlMessage::Run");
      messages[j]->Run();
    }
  }
  mFrontMessageQueue.Clear();
}

void MediaTrackGraphImpl::UpdateGraph(GraphTime aEndBlockingDecisions) {
  TRACE("MTG::UpdateGraph");
  MOZ_ASSERT(OnGraphThread());
  MOZ_ASSERT(aEndBlockingDecisions >= mProcessedTime);
  // The next state computed time can be the same as the previous: it
  // means the driver would have been blocking indefinitly, but the graph has
  // been woken up right after having been to sleep.
  MOZ_ASSERT(aEndBlockingDecisions >= mStateComputedTime);

  CheckDriver();
  UpdateTrackOrder();

  // Always do another iteration if there are tracks waiting to resume.
  bool ensureNextIteration = !mPendingResumeOperations.IsEmpty();

  for (MediaTrack* track : mTracks) {
    if (SourceMediaTrack* is = track->AsSourceTrack()) {
      ensureNextIteration |= is->PullNewData(aEndBlockingDecisions);
      is->ExtractPendingInput(mStateComputedTime, aEndBlockingDecisions);
    }
    if (track->mEnded) {
      // The track's not suspended, and since it's ended, underruns won't
      // stop it playing out. So there's no blocking other than what we impose
      // here.
      GraphTime endTime = track->GetEnd() + track->mStartTime;
      if (endTime <= mStateComputedTime) {
        LOG(LogLevel::Verbose,
            ("%p: MediaTrack %p is blocked due to being ended"this, track));
        track->mStartBlocking = mStateComputedTime;
      } else {
        LOG(LogLevel::Verbose,
            ("%p: MediaTrack %p has ended, but is not blocked yet (current "
             "time %f, end at %f)",
             this, track, MediaTimeToSeconds(mStateComputedTime),
             MediaTimeToSeconds(endTime)));
        // Data can't be added to a ended track, so underruns are irrelevant.
        MOZ_ASSERT(endTime <= aEndBlockingDecisions);
        track->mStartBlocking = endTime;
      }
    } else {
      track->mStartBlocking = WillUnderrun(track, aEndBlockingDecisions);

#ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
      if (SourceMediaTrack* s = track->AsSourceTrack()) {
        if (s->Ended()) {
          continue;
        }
        {
          MutexAutoLock lock(s->mMutex);
          if (!s->mUpdateTrack->mPullingEnabled) {
            // The invariant that data must be provided is only enforced when
            // pulling.
            continue;
          }
        }
        if (track->GetEnd() <
            track->GraphTimeToTrackTime(aEndBlockingDecisions)) {
          LOG(LogLevel::Error,
              ("%p: SourceMediaTrack %p (%s) is live and pulled, "
               "but wasn't fed "
               "enough data. TrackListeners=%zu. Track-end=%f, "
               "Iteration-end=%f",
               this, track,
               (track->mType == MediaSegment::AUDIO ? "audio" : "video"),
               track->mTrackListeners.Length(),
               MediaTimeToSeconds(track->GetEnd()),
               MediaTimeToSeconds(
                   track->GraphTimeToTrackTime(aEndBlockingDecisions))));
          MOZ_DIAGNOSTIC_CRASH(
              "A non-ended SourceMediaTrack wasn't fed "
              "enough data by NotifyPull");
        }
      }
#endif /* MOZ_DIAGNOSTIC_ASSERT_ENABLED */
    }
  }

  for (MediaTrack* track : mSuspendedTracks) {
    track->mStartBlocking = mStateComputedTime;
  }

  // If the loop is woken up so soon that IterationEnd() barely advances or
  // if an offline graph is not currently rendering, we end up having
  // aEndBlockingDecisions == mStateComputedTime.
  // Since the process interval [mStateComputedTime, aEndBlockingDecision) is
  // empty, Process() will not find any unblocked track and so will not
  // ensure another iteration.  If the graph should be rendering, then ensure
  // another iteration to render.
  if (ensureNextIteration || (aEndBlockingDecisions == mStateComputedTime &&
                              mStateComputedTime < mEndTime)) {
    EnsureNextIteration();
  }
}

void MediaTrackGraphImpl::SelectOutputDeviceForAEC() {
  MOZ_ASSERT(OnGraphThread());
  size_t currentDeviceIndex = mOutputDevices.IndexOf(mOutputDeviceForAEC);
  if (currentDeviceIndex == mOutputDevices.NoIndex) {
    // Outputs for this device have been removed.
    // Fall back to the primary output device.
    LOG(LogLevel::Info, ("%p: No remaining outputs to device %p. "
                         "Switch to primary output device %p for AEC",
                         this, mOutputDeviceForAEC, PrimaryOutputDeviceID()));
    mOutputDeviceForAEC = PrimaryOutputDeviceID();
    currentDeviceIndex = 0;
    MOZ_ASSERT(mOutputDevices[0].mDeviceID == mOutputDeviceForAEC);
  }
  if (mOutputDevices.Length() == 1) {
    // No other output devices so there is no choice.
    return;
  }

  // The output is considered silent intentionally only if the whole duration
  // (often more than just this processing interval) of audio data in the
  // MediaSegment is null so as to reduce switching between output devices
  // should there be short durations of silence.
  auto HasNonNullAudio = [](const TrackAndVolume& aTV) {
    return aTV.mVolume != 0 && !aTV.mTrack->IsSuspended() &&
           !aTV.mTrack->GetData()->IsNull();
  };
  // Keep using the same output device stream if it has non-null data,
  // so as to stay with a stream having ongoing audio.  If the output stream
  // is switched, the echo cancellation algorithm can take some time to adjust
  // to the change in delay, so there is less value in switching back and
  // forth between output devices for very short sounds.
  for (const auto& output : mOutputDevices[currentDeviceIndex].mTrackOutputs) {
    if (HasNonNullAudio(output)) {
      return;
    }
  }
  // The current output device is silent.  Use another if it has non-null data.
  for (const auto& outputDeviceEntry : mOutputDevices) {
    for (const auto& output : outputDeviceEntry.mTrackOutputs) {
      if (HasNonNullAudio(output)) {
        // Switch to this device.
        LOG(LogLevel::Info,
            ("%p: Switch output device for AEC from silent %p to non-null %p",
             this, mOutputDeviceForAEC, outputDeviceEntry.mDeviceID));
        mOutputDeviceForAEC = outputDeviceEntry.mDeviceID;
        return;
      }
    }
  }
  // Null data for all outputs.  Keep using the same device.
}

void MediaTrackGraphImpl::Process(MixerCallbackReceiver* aMixerReceiver) {
  TRACE("MTG::Process");
  MOZ_ASSERT(OnGraphThread());
  if (mStateComputedTime == mProcessedTime) {  // No frames to render.
    return;
  }

  // Play track contents.
  bool allBlockedForever = true;
  // True when we've done ProcessInput for all processed tracks.
  bool doneAllProducing = false;
  const GraphTime oldProcessedTime = mProcessedTime;

  // Figure out what each track wants to do
  for (uint32_t i = 0; i < mTracks.Length(); ++i) {
    MediaTrack* track = mTracks[i];
    if (!doneAllProducing) {
      ProcessedMediaTrack* pt = track->AsProcessedTrack();
      if (pt) {
        AudioNodeTrack* n = track->AsAudioNodeTrack();
        if (n) {
#ifdef DEBUG
          // Verify that the sampling rate for all of the following tracks is
          // the same
          for (uint32_t j = i + 1; j < mTracks.Length(); ++j) {
            AudioNodeTrack* nextTrack = mTracks[j]->AsAudioNodeTrack();
            if (nextTrack) {
              MOZ_ASSERT(n->mSampleRate == nextTrack->mSampleRate,
                         "All AudioNodeTracks in the graph must have the same "
                         "sampling rate");
            }
          }
#endif
          // Since an AudioNodeTrack is present, go ahead and
          // produce audio block by block for all the rest of the tracks.
          ProduceDataForTracksBlockByBlock(i, n->mSampleRate);
          doneAllProducing = true;
        } else {
          pt->ProcessInput(mProcessedTime, mStateComputedTime,
                           ProcessedMediaTrack::ALLOW_END);
          // Assert that a live track produced enough data
          MOZ_ASSERT_IF(!track->mEnded,
                        track->GetEnd() >= GraphTimeToTrackTimeWithBlocking(
                                               track, mStateComputedTime));
        }
      }
    }
    if (track->mStartBlocking > oldProcessedTime) {
      allBlockedForever = false;
    }
  }
  mProcessedTime = mStateComputedTime;

  SelectOutputDeviceForAEC();
  for (const auto& outputDeviceEntry : mOutputDevices) {
    uint32_t outputChannelCount;
    if (!outputDeviceEntry.mReceiver) {  // primary output
      if (!aMixerReceiver) {
        // Running off a system clock driver.  No need to mix output.
        continue;
      }
      MOZ_ASSERT(CurrentDriver()->AsAudioCallbackDriver(),
                 "Driver must be AudioCallbackDriver if aMixerReceiver");
      // Use the number of channel the driver expects: this is the number of
      // channel that can be output by the underlying system level audio stream.
      outputChannelCount =
          CurrentDriver()->AsAudioCallbackDriver()->OutputChannelCount();
    } else {
      outputChannelCount = AudioOutputChannelCount(outputDeviceEntry);
    }
    MOZ_ASSERT(mRealtime,
               "If there's an output device, this graph must be realtime");
    mMixer.StartMixing();
    // This is the number of frames that are written to the output buffer, for
    // this iteration.
    TrackTime ticksPlayed = 0;
    for (const auto& t : outputDeviceEntry.mTrackOutputs) {
      TrackTime ticksPlayedForThisTrack =
          PlayAudio(t, oldProcessedTime, outputChannelCount);
      if (ticksPlayed == 0) {
        ticksPlayed = ticksPlayedForThisTrack;
      } else {
        MOZ_ASSERT(
            !ticksPlayedForThisTrack || ticksPlayedForThisTrack == ticksPlayed,
            "Each track should have the same number of frames.");
      }
    }

    if (ticksPlayed == 0) {
      // Nothing was played, so the mixer doesn't know how many frames were
      // processed. We still tell it so AudioCallbackDriver knows how much has
      // been processed. (bug 1406027)
      mMixer.Mix(nullptr, outputChannelCount,
                 mStateComputedTime - oldProcessedTime, mSampleRate);
    }
    AudioChunk* outputChunk = mMixer.MixedChunk();
    if (outputDeviceEntry.mDeviceID == mOutputDeviceForAEC) {
      // Callback any observers for the AEC speaker data.  Note that one
      // (maybe) of these will be full-duplex, the others will get their input
      // data off separate cubeb callbacks.
      NotifyOutputData(*outputChunk);
    }
    if (!outputDeviceEntry.mReceiver) {  // primary output
      aMixerReceiver->MixerCallback(outputChunk, mSampleRate);
    } else {
      outputDeviceEntry.mReceiver->EnqueueAudio(*outputChunk);
    }
  }

  if (!allBlockedForever) {
    EnsureNextIteration();
  }
}

bool MediaTrackGraphImpl::UpdateMainThreadState() {
  MOZ_ASSERT(OnGraphThread());
  if (mForceShutDownReceived) {
    for (MediaTrack* track : AllTracks()) {
      track->OnGraphThreadDone();
    }
  }
  {
    MonitorAutoLock lock(mMonitor);
    bool finalUpdate =
        mForceShutDownReceived || (IsEmpty() && mBackMessageQueue.IsEmpty());
    PrepareUpdatesToMainThreadState(finalUpdate);
    if (!finalUpdate) {
      SwapMessageQueues();
      return true;
    }
    // The JSContext will not be used again.
    // Clear main thread access while under monitor.
    mJSContext = nullptr;
  }
  dom::WorkletThread::DeleteCycleCollectedJSContext();
  // Enter shutdown mode when this iteration is completed.
  // No need to Destroy tracks here. The main-thread owner of each
  // track is responsible for calling Destroy on them.
  return false;
}

auto MediaTrackGraphImpl::OneIteration(GraphTime aStateTime,
                                       GraphTime aIterationEnd,
                                       MixerCallbackReceiver* aMixerReceiver)
    -> IterationResult {
  if (mGraphRunner) {
    return mGraphRunner->OneIteration(aStateTime, aIterationEnd,
                                      aMixerReceiver);
  }

  return OneIterationImpl(aStateTime, aIterationEnd, aMixerReceiver);
}

auto MediaTrackGraphImpl::OneIterationImpl(
    GraphTime aStateTime, GraphTime aIterationEnd,
    MixerCallbackReceiver* aMixerReceiver) -> IterationResult {
  TRACE("MTG::OneIterationImpl");

  if (SoftRealTimeLimitReached()) {
    TRACE("MTG::Demoting real-time thread!");
    DemoteThreadFromRealTime();
  }

  // Changes to LIFECYCLE_RUNNING occur before starting or reviving the graph
  // thread, and so the monitor need not be held to check mLifecycleState.
  // LIFECYCLE_THREAD_NOT_STARTED is possible when shutting down offline
  // graphs that have not started.

  // While changes occur on mainthread, this assert confirms that
  // this code shouldn't run if mainthread might be changing the state (to
  // > LIFECYCLE_RUNNING)

  // Ignore mutex warning: static during execution of the graph
  MOZ_PUSH_IGNORE_THREAD_SAFETY
  MOZ_DIAGNOSTIC_ASSERT(mLifecycleState <= LIFECYCLE_RUNNING);
  MOZ_POP_THREAD_SAFETY

  MOZ_ASSERT(OnGraphThread());

  WebCore::DenormalDisabler disabler;

  // Process graph message from the main thread for this iteration.
  RunMessagesInQueue();

  // Process MessagePort events.
  // These require a single thread, which has an nsThread with an event queue.
  if (mGraphRunner || !mRealtime) {
    TRACE("MTG::MessagePort events");
    NS_ProcessPendingEvents(nullptr);
  }

  GraphTime stateTime = std::min(aStateTime, GraphTime(mEndTime));
  UpdateGraph(stateTime);

  mStateComputedTime = stateTime;

  GraphTime oldProcessedTime = mProcessedTime;
  Process(aMixerReceiver);
  MOZ_ASSERT(mProcessedTime == stateTime);

  UpdateCurrentTimeForTracks(oldProcessedTime);

  ProcessChunkMetadata(oldProcessedTime);

  // Process graph messages queued from RunMessageAfterProcessing() on this
  // thread during the iteration.
  RunMessagesInQueue();

  if (!UpdateMainThreadState()) {
    if (Switching()) {
      // We'll never get to do this switch. Clear mNextDriver to break the
      // ref-cycle graph->nextDriver->currentDriver->graph.
      SwitchAtNextIteration(nullptr);
    }
    return IterationResult::CreateStop(
        NewRunnableMethod("MediaTrackGraphImpl::SignalMainThreadCleanup"this,
                          &MediaTrackGraphImpl::SignalMainThreadCleanup));
  }

  if (Switching()) {
    RefPtr<GraphDriver> nextDriver = std::move(mNextDriver);
    return IterationResult::CreateSwitchDriver(
        nextDriver, NewRunnableMethod<StoreRefPtrPassByPtr<GraphDriver>>(
                        "MediaTrackGraphImpl::SetCurrentDriver"this,
                        &MediaTrackGraphImpl::SetCurrentDriver, nextDriver));
  }

  return IterationResult::CreateStillProcessing();
}

void MediaTrackGraphImpl::ApplyTrackUpdate(TrackUpdate* aUpdate) {
  MOZ_ASSERT(NS_IsMainThread());
  mMonitor.AssertCurrentThreadOwns();

  MediaTrack* track = aUpdate->mTrack;
  if (!track) return;
  track->mMainThreadCurrentTime = aUpdate->mNextMainThreadCurrentTime;
  track->mMainThreadEnded = aUpdate->mNextMainThreadEnded;

--> --------------------

--> maximum size reached

--> --------------------

Messung V0.5
C=85 H=87 G=85

¤ Dauer der Verarbeitung: 0.19 Sekunden  ¤

*© Formatika GbR, Deutschland






Wurzel

Suchen

Beweissystem der NASA

Beweissystem Isabelle

NIST Cobol Testsuite

Cephes Mathematical Library

Wiener Entwicklungsmethode

Haftungshinweis

Die Informationen auf dieser Webseite wurden nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit, noch Qualität der bereit gestellten Informationen zugesichert.

Bemerkung:

Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.






                                                                                                                                                                                                                                                                                                                                                                                                     


Neuigkeiten

     Aktuelles
     Motto des Tages

Software

     Produkte
     Quellcodebibliothek

Aktivitäten

     Artikel über Sicherheit
     Anleitung zur Aktivierung von SSL

Muße

     Gedichte
     Musik
     Bilder

Jenseits des Üblichen ....

Besucherstatistik

Besucherstatistik

Monitoring

Montastic status badge