Source code

Revision control

Other Tools

/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "MediaTrackGraphImpl.h"
#include "mozilla/MathAlgorithms.h"
#include "mozilla/Unused.h"
#include "AudioSegment.h"
#include "CrossGraphPort.h"
#include "VideoSegment.h"
#include "nsContentUtils.h"
#include "nsPrintfCString.h"
#include "nsServiceManagerUtils.h"
#include "prerror.h"
#include "mozilla/Logging.h"
#include "mozilla/Attributes.h"
#include "ForwardedInputTrack.h"
#include "ImageContainer.h"
#include "AudioCaptureTrack.h"
#include "AudioNodeTrack.h"
#include "AudioNodeExternalInputTrack.h"
#include "MediaTrackListener.h"
#include "mozilla/dom/BaseAudioContextBinding.h"
#include "mozilla/dom/WorkletThread.h"
#include "mozilla/media/MediaUtils.h"
#include <algorithm>
#include "GeckoProfiler.h"
#include "VideoFrameContainer.h"
#include "mozilla/AbstractThread.h"
#include "mozilla/StaticPrefs_dom.h"
#include "mozilla/Unused.h"
#include "transport/runnable_utils.h"
#include "VideoUtils.h"
#include "GraphRunner.h"
#include "Tracing.h"
#include "UnderrunHandler.h"
#include "mozilla/CycleCollectedJSRuntime.h"
#include "webaudio/blink/DenormalDisabler.h"
#include "webaudio/blink/HRTFDatabaseLoader.h"
using std::move;
using namespace mozilla::layers;
using namespace mozilla::dom;
using namespace mozilla::gfx;
using namespace mozilla::media;
namespace mozilla {
LazyLogModule gMediaTrackGraphLog("MediaTrackGraph");
#ifdef LOG
# undef LOG
#endif // LOG
#define LOG(type, msg) MOZ_LOG(gMediaTrackGraphLog, type, msg)
/**
* A hash table containing the graph instances, one per document.
*
* The key is a hash of nsPIDOMWindowInner, see `WindowToHash`.
*/
static nsTHashMap<nsUint32HashKey, MediaTrackGraphImpl*> gGraphs;
const AudioDataValue* AudioInputSamples::Data() const {
return mData.Elements();
}
size_t AudioInputSamples::FrameCount() const {
MOZ_ASSERT(mChannels > 0);
return mData.Length() / mChannels;
}
TrackRate AudioInputSamples::Rate() const { return mRate; }
uint32_t AudioInputSamples::Channels() const { return mChannels; }
bool AudioInputSamples::IsEmpty() const { return mData.IsEmpty(); }
void AudioInputSamples::Push(const AudioDataValue* aBuffer, size_t aFrames,
TrackRate aRate, uint32_t aChannels) {
MOZ_ASSERT(aRate > 0);
MOZ_ASSERT(aChannels > 0);
if (mRate == 0) {
mRate = aRate;
}
if (mChannels == 0) {
mChannels = aChannels;
}
MOZ_ASSERT(aRate == mRate);
MOZ_ASSERT(aChannels == mChannels);
CheckedInt<size_t> samples(aFrames);
samples *= static_cast<size_t>(aChannels);
MOZ_ASSERT(samples.isValid());
size_t oldLen = mData.Length();
size_t newLen = oldLen + samples.value();
if (newLen > mData.Capacity()) {
mData.SetCapacity(newLen);
}
mData.SetLengthAndRetainStorage(newLen);
AudioDataValue* dest = mData.Elements() + oldLen;
PodCopy(dest, aBuffer, samples.value());
}
void AudioInputSamples::Clear() {
mRate = 0;
mChannels = 0;
mData.ClearAndRetainStorage();
}
NativeInputTrack* NativeInputTrack::Create(MediaTrackGraphImpl* aGraph) {
MOZ_ASSERT(NS_IsMainThread());
NativeInputTrack* track = new NativeInputTrack(aGraph->GraphRate());
aGraph->AddTrack(track);
return track;
}
size_t NativeInputTrack::AddUser() {
MOZ_ASSERT(NS_IsMainThread());
mUserCount += 1;
return mUserCount;
}
size_t NativeInputTrack::RemoveUser() {
MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(mUserCount > 0);
mUserCount -= 1;
return mUserCount;
}
void NativeInputTrack::DestroyImpl() {
MOZ_ASSERT(mGraph->OnGraphThreadOrNotRunning());
mInputData.Clear();
ProcessedMediaTrack::DestroyImpl();
}
void NativeInputTrack::ProcessInput(GraphTime aFrom, GraphTime aTo,
uint32_t aFlags) {
MOZ_ASSERT(mGraph->OnGraphThreadOrNotRunning());
TRACE_COMMENT("NativeInputTrack::ProcessInput", "%p", this);
if (mInputData.IsEmpty()) {
return;
}
// The number of NotifyInputData and ProcessInput calls could be different. We
// always process the input data from NotifyInputData in the first
// ProcessInput after the NotifyInputData
// The mSegment will be the de-interleaved audio data converted from
// mInputData
GetData<AudioSegment>()->Clear();
GetData<AudioSegment>()->AppendFromInterleavedBuffer(
mInputData.Data(), mInputData.FrameCount(), mInputData.Channels(),
PRINCIPAL_HANDLE_NONE);
mInputData.Clear();
}
uint32_t NativeInputTrack::NumberOfChannels() const {
MOZ_ASSERT(mGraph->OnGraphThreadOrNotRunning());
return mInputChannels;
}
void NativeInputTrack::NotifyOutputData(MediaTrackGraphImpl* aGraph,
AudioDataValue* aBuffer, size_t aFrames,
TrackRate aRate, uint32_t aChannels) {
MOZ_ASSERT(aGraph->OnGraphThreadOrNotRunning());
MOZ_ASSERT(aGraph == mGraph, "Receive output data from another graph");
for (auto& listener : mDataUsers) {
listener->NotifyOutputData(aGraph, aBuffer, aFrames, aRate, aChannels);
}
}
void NativeInputTrack::NotifyInputStopped(MediaTrackGraphImpl* aGraph) {
MOZ_ASSERT(aGraph->OnGraphThreadOrNotRunning());
MOZ_ASSERT(aGraph == mGraph,
"Receive input stopped signal from another graph");
mInputChannels = 0;
mInputData.Clear();
for (auto& listener : mDataUsers) {
listener->NotifyInputStopped(aGraph);
}
}
void NativeInputTrack::NotifyInputData(MediaTrackGraphImpl* aGraph,
const AudioDataValue* aBuffer,
size_t aFrames, TrackRate aRate,
uint32_t aChannels,
uint32_t aAlreadyBuffered) {
MOZ_ASSERT(aGraph->OnGraphThreadOrNotRunning());
MOZ_ASSERT(aGraph == mGraph, "Receive input data from another graph");
MOZ_ASSERT(aChannels);
if (!mInputChannels) {
mInputChannels = aChannels;
}
mInputData.Push(aBuffer, aFrames, aRate, aChannels);
for (auto& listener : mDataUsers) {
listener->NotifyInputData(aGraph, aBuffer, aFrames, aRate, aChannels,
aAlreadyBuffered);
}
}
void NativeInputTrack::DeviceChanged(MediaTrackGraphImpl* aGraph) {
MOZ_ASSERT(aGraph->OnGraphThreadOrNotRunning());
MOZ_ASSERT(aGraph == mGraph,
"Receive device changed signal from another graph");
mInputData.Clear();
for (auto& listener : mDataUsers) {
listener->DeviceChanged(aGraph);
}
}
MediaTrackGraphImpl::~MediaTrackGraphImpl() {
MOZ_ASSERT(mTracks.IsEmpty() && mSuspendedTracks.IsEmpty(),
"All tracks should have been destroyed by messages from the main "
"thread");
LOG(LogLevel::Debug, ("MediaTrackGraph %p destroyed", this));
LOG(LogLevel::Debug, ("MediaTrackGraphImpl::~MediaTrackGraphImpl"));
}
void MediaTrackGraphImpl::AddTrackGraphThread(MediaTrack* aTrack) {
MOZ_ASSERT(OnGraphThreadOrNotRunning());
aTrack->mStartTime = mProcessedTime;
if (aTrack->IsSuspended()) {
mSuspendedTracks.AppendElement(aTrack);
LOG(LogLevel::Debug,
("%p: Adding media track %p, in the suspended track array", this,
aTrack));
} else {
mTracks.AppendElement(aTrack);
LOG(LogLevel::Debug, ("%p: Adding media track %p, count %zu", this, aTrack,
mTracks.Length()));
}
SetTrackOrderDirty();
}
void MediaTrackGraphImpl::RemoveTrackGraphThread(MediaTrack* aTrack) {
MOZ_ASSERT(OnGraphThreadOrNotRunning());
// Remove references in mTrackUpdates before we allow aTrack to die.
// Pending updates are not needed (since the main thread has already given
// up the track) so we will just drop them.
{
MonitorAutoLock lock(mMonitor);
for (uint32_t i = 0; i < mTrackUpdates.Length(); ++i) {
if (mTrackUpdates[i].mTrack == aTrack) {
mTrackUpdates[i].mTrack = nullptr;
}
}
}
// Ensure that mFirstCycleBreaker is updated when necessary.
SetTrackOrderDirty();
UnregisterAllAudioOutputs(aTrack);
if (aTrack->IsSuspended()) {
mSuspendedTracks.RemoveElement(aTrack);
} else {
mTracks.RemoveElement(aTrack);
}
LOG(LogLevel::Debug, ("%p: Removed media track %p, count %zu", this, aTrack,
mTracks.Length()));
NS_RELEASE(aTrack); // probably destroying it
}
TrackTime MediaTrackGraphImpl::GraphTimeToTrackTimeWithBlocking(
const MediaTrack* aTrack, GraphTime aTime) const {
MOZ_ASSERT(
aTime <= mStateComputedTime,
"Don't ask about times where we haven't made blocking decisions yet");
return std::max<TrackTime>(
0, std::min(aTime, aTrack->mStartBlocking) - aTrack->mStartTime);
}
GraphTime MediaTrackGraphImpl::IterationEnd() const {
MOZ_ASSERT(OnGraphThread());
return mIterationEndTime;
}
void MediaTrackGraphImpl::UpdateCurrentTimeForTracks(
GraphTime aPrevCurrentTime) {
MOZ_ASSERT(OnGraphThread());
for (MediaTrack* track : AllTracks()) {
// Shouldn't have already notified of ended *and* have output!
MOZ_ASSERT_IF(track->mStartBlocking > aPrevCurrentTime,
!track->mNotifiedEnded);
// Calculate blocked time and fire Blocked/Unblocked events
GraphTime blockedTime = mStateComputedTime - track->mStartBlocking;
NS_ASSERTION(blockedTime >= 0, "Error in blocking time");
track->AdvanceTimeVaryingValuesToCurrentTime(mStateComputedTime,
blockedTime);
LOG(LogLevel::Verbose,
("%p: MediaTrack %p bufferStartTime=%f blockedTime=%f", this, track,
MediaTimeToSeconds(track->mStartTime),
MediaTimeToSeconds(blockedTime)));
track->mStartBlocking = mStateComputedTime;
TrackTime trackCurrentTime =
track->GraphTimeToTrackTime(mStateComputedTime);
if (track->mEnded) {
MOZ_ASSERT(track->GetEnd() <= trackCurrentTime);
if (!track->mNotifiedEnded) {
// Playout of this track ended and listeners have not been notified.
track->mNotifiedEnded = true;
SetTrackOrderDirty();
for (const auto& listener : track->mTrackListeners) {
listener->NotifyOutput(this, track->GetEnd());
listener->NotifyEnded(this);
}
}
} else {
for (const auto& listener : track->mTrackListeners) {
listener->NotifyOutput(this, trackCurrentTime);
}
}
}
}
template <typename C, typename Chunk>
void MediaTrackGraphImpl::ProcessChunkMetadataForInterval(MediaTrack* aTrack,
C& aSegment,
TrackTime aStart,
TrackTime aEnd) {
MOZ_ASSERT(OnGraphThreadOrNotRunning());
MOZ_ASSERT(aTrack);
TrackTime offset = 0;
for (typename C::ConstChunkIterator chunk(aSegment); !chunk.IsEnded();
chunk.Next()) {
if (offset >= aEnd) {
break;
}
offset += chunk->GetDuration();
if (chunk->IsNull() || offset < aStart) {
continue;
}
const PrincipalHandle& principalHandle = chunk->GetPrincipalHandle();
if (principalHandle != aSegment.GetLastPrincipalHandle()) {
aSegment.SetLastPrincipalHandle(principalHandle);
LOG(LogLevel::Debug,
("%p: MediaTrack %p, principalHandle "
"changed in %sChunk with duration %lld",
this, aTrack,
aSegment.GetType() == MediaSegment::AUDIO ? "Audio" : "Video",
(long long)chunk->GetDuration()));
for (const auto& listener : aTrack->mTrackListeners) {
listener->NotifyPrincipalHandleChanged(this, principalHandle);
}
}
}
}
void MediaTrackGraphImpl::ProcessChunkMetadata(GraphTime aPrevCurrentTime) {
MOZ_ASSERT(OnGraphThreadOrNotRunning());
for (MediaTrack* track : AllTracks()) {
TrackTime iterationStart = track->GraphTimeToTrackTime(aPrevCurrentTime);
TrackTime iterationEnd = track->GraphTimeToTrackTime(mProcessedTime);
if (!track->mSegment) {
continue;
}
if (track->mType == MediaSegment::AUDIO) {
ProcessChunkMetadataForInterval<AudioSegment, AudioChunk>(
track, *track->GetData<AudioSegment>(), iterationStart, iterationEnd);
} else if (track->mType == MediaSegment::VIDEO) {
ProcessChunkMetadataForInterval<VideoSegment, VideoChunk>(
track, *track->GetData<VideoSegment>(), iterationStart, iterationEnd);
} else {
MOZ_CRASH("Unknown track type");
}
}
}
GraphTime MediaTrackGraphImpl::WillUnderrun(MediaTrack* aTrack,
GraphTime aEndBlockingDecisions) {
// Ended tracks can't underrun. ProcessedMediaTracks also can't cause
// underrun currently, since we'll always be able to produce data for them
// unless they block on some other track.
if (aTrack->mEnded || aTrack->AsProcessedTrack()) {
return aEndBlockingDecisions;
}
// This track isn't ended or suspended. We don't need to call
// TrackTimeToGraphTime since an underrun is the only thing that can block
// it.
GraphTime bufferEnd = aTrack->GetEnd() + aTrack->mStartTime;
#ifdef DEBUG
if (bufferEnd < mProcessedTime) {
LOG(LogLevel::Error, ("%p: MediaTrack %p underrun, "
"bufferEnd %f < mProcessedTime %f (%" PRId64
" < %" PRId64 "), TrackTime %" PRId64,
this, aTrack, MediaTimeToSeconds(bufferEnd),
MediaTimeToSeconds(mProcessedTime), bufferEnd,
mProcessedTime, aTrack->GetEnd()));
NS_ASSERTION(bufferEnd >= mProcessedTime, "Buffer underran");
}
#endif
return std::min(bufferEnd, aEndBlockingDecisions);
}
namespace {
// Value of mCycleMarker for unvisited tracks in cycle detection.
const uint32_t NOT_VISITED = UINT32_MAX;
// Value of mCycleMarker for ordered tracks in muted cycles.
const uint32_t IN_MUTED_CYCLE = 1;
} // namespace
bool MediaTrackGraphImpl::AudioTrackPresent() {
MOZ_ASSERT(OnGraphThreadOrNotRunning());
bool audioTrackPresent = false;
for (MediaTrack* track : mTracks) {
if (track->AsAudioNodeTrack()) {
audioTrackPresent = true;
break;
}
if (track->mType == MediaSegment::AUDIO && !track->mNotifiedEnded) {
audioTrackPresent = true;
break;
}
}
// We may not have audio input device when we only have AudioNodeTracks. But
// if audioTrackPresent is false, we must have no input device.
MOZ_DIAGNOSTIC_ASSERT_IF(!audioTrackPresent, mDeviceTrackMap.Count() == 0);
return audioTrackPresent;
}
void MediaTrackGraphImpl::CheckDriver() {
MOZ_ASSERT(OnGraphThread());
// An offline graph has only one driver.
// Otherwise, if a switch is already pending, let that happen.
if (!mRealtime || Switching()) {
return;
}
AudioCallbackDriver* audioCallbackDriver =
CurrentDriver()->AsAudioCallbackDriver();
if (audioCallbackDriver) {
for (PendingResumeOperation& op : mPendingResumeOperations) {
op.Apply(this);
}
mPendingResumeOperations.Clear();
}
// Note that this looks for any audio tracks, input or output, and switches
// to a SystemClockDriver if there are none active or no resume operations
// to make any active.
bool needAudioCallbackDriver =
!mPendingResumeOperations.IsEmpty() || AudioTrackPresent();
if (!needAudioCallbackDriver) {
if (audioCallbackDriver && audioCallbackDriver->IsStarted()) {
SwitchAtNextIteration(
new SystemClockDriver(this, CurrentDriver(), mSampleRate));
}
return;
}
uint32_t graphOutputChannelCount = AudioOutputChannelCount();
if (!audioCallbackDriver) {
if (graphOutputChannelCount > 0) {
AudioCallbackDriver* driver = new AudioCallbackDriver(
this, CurrentDriver(), mSampleRate, graphOutputChannelCount,
AudioInputChannelCount(), mOutputDeviceID, mInputDeviceID,
AudioInputDevicePreference());
SwitchAtNextIteration(driver);
}
return;
}
// Check if this graph should switch to a different number of output channels.
// Generally, a driver switch is explicitly made by an event (e.g., setting
// the AudioDestinationNode channelCount), but if an HTMLMediaElement is
// directly playing back via another HTMLMediaElement, the number of channels
// of the media determines how many channels to output, and it can change
// dynamically.
if (graphOutputChannelCount != audioCallbackDriver->OutputChannelCount()) {
AudioCallbackDriver* driver = new AudioCallbackDriver(
this, CurrentDriver(), mSampleRate, graphOutputChannelCount,
AudioInputChannelCount(), mOutputDeviceID, mInputDeviceID,
AudioInputDevicePreference());
SwitchAtNextIteration(driver);
}
}
void MediaTrackGraphImpl::UpdateTrackOrder() {
if (!mTrackOrderDirty) {
return;
}
mTrackOrderDirty = false;
// The algorithm for finding cycles is based on Tim Leslie's iterative
// implementation [1][2] of Pearce's variant [3] of Tarjan's strongly
// connected components (SCC) algorithm. There are variations (a) to
// distinguish whether tracks in SCCs of size 1 are in a cycle and (b) to
// re-run the algorithm over SCCs with breaks at DelayNodes.
//
// [2]
//
// There are two stacks. One for the depth-first search (DFS),
mozilla::LinkedList<MediaTrack> dfsStack;
// and another for tracks popped from the DFS stack, but still being
// considered as part of SCCs involving tracks on the stack.
mozilla::LinkedList<MediaTrack> sccStack;
// An index into mTracks for the next track found with no unsatisfied
// upstream dependencies.
uint32_t orderedTrackCount = 0;
for (uint32_t i = 0; i < mTracks.Length(); ++i) {
MediaTrack* t = mTracks[i];
ProcessedMediaTrack* pt = t->AsProcessedTrack();
if (pt) {
// The dfsStack initially contains a list of all processed tracks in
// unchanged order.
dfsStack.insertBack(t);
pt->mCycleMarker = NOT_VISITED;
} else {
// SourceMediaTracks have no inputs and so can be ordered now.
mTracks[orderedTrackCount] = t;
++orderedTrackCount;
}
}
// mNextStackMarker corresponds to "index" in Tarjan's algorithm. It is a
// counter to label mCycleMarker on the next visited track in the DFS
// uniquely in the set of visited tracks that are still being considered.
//
// In this implementation, the counter descends so that the values are
// strictly greater than the values that mCycleMarker takes when the track
// has been ordered (0 or IN_MUTED_CYCLE).
//
// Each new track labelled, as the DFS searches upstream, receives a value
// less than those used for all other tracks being considered.
uint32_t nextStackMarker = NOT_VISITED - 1;
// Reset list of DelayNodes in cycles stored at the tail of mTracks.
mFirstCycleBreaker = mTracks.Length();
// Rearrange dfsStack order as required to DFS upstream and pop tracks
// in processing order to place in mTracks.
while (auto pt = static_cast<ProcessedMediaTrack*>(dfsStack.getFirst())) {
const auto& inputs = pt->mInputs;
MOZ_ASSERT(pt->AsProcessedTrack());
if (pt->mCycleMarker == NOT_VISITED) {
// Record the position on the visited stack, so that any searches
// finding this track again know how much of the stack is in the cycle.
pt->mCycleMarker = nextStackMarker;
--nextStackMarker;
// Not-visited input tracks should be processed first.
// SourceMediaTracks have already been ordered.
for (uint32_t i = inputs.Length(); i--;) {
if (inputs[i]->GetSource()->IsSuspended()) {
continue;
}
auto input = inputs[i]->GetSource()->AsProcessedTrack();
if (input && input->mCycleMarker == NOT_VISITED) {
// It can be that this track has an input which is from a suspended
// AudioContext.
if (input->isInList()) {
input->remove();
dfsStack.insertFront(input);
}
}
}
continue;
}
// Returning from DFS. Pop from dfsStack.
pt->remove();
// cycleStackMarker keeps track of the highest marker value on any
// upstream track, if any, found receiving input, directly or indirectly,
// from the visited stack (and so from |ps|, making a cycle). In a
// variation from Tarjan's SCC algorithm, this does not include |ps|
// unless it is part of the cycle.
uint32_t cycleStackMarker = 0;
for (uint32_t i = inputs.Length(); i--;) {
if (inputs[i]->GetSource()->IsSuspended()) {
continue;
}
auto input = inputs[i]->GetSource()->AsProcessedTrack();
if (input) {
cycleStackMarker = std::max(cycleStackMarker, input->mCycleMarker);
}
}
if (cycleStackMarker <= IN_MUTED_CYCLE) {
// All inputs have been ordered and their stack markers have been removed.
// This track is not part of a cycle. It can be processed next.
pt->mCycleMarker = 0;
mTracks[orderedTrackCount] = pt;
++orderedTrackCount;
continue;
}
// A cycle has been found. Record this track for ordering when all
// tracks in this SCC have been popped from the DFS stack.
sccStack.insertFront(pt);
if (cycleStackMarker > pt->mCycleMarker) {
// Cycles have been found that involve tracks that remain on the stack.
// Leave mCycleMarker indicating the most downstream (last) track on
// the stack known to be part of this SCC. In this way, any searches on
// other paths that find |ps| will know (without having to traverse from
// this track again) that they are part of this SCC (i.e. part of an
// intersecting cycle).
pt->mCycleMarker = cycleStackMarker;
continue;
}
// |pit| is the root of an SCC involving no other tracks on dfsStack, the
// complete SCC has been recorded, and tracks in this SCC are part of at
// least one cycle.
MOZ_ASSERT(cycleStackMarker == pt->mCycleMarker);
// If there are DelayNodes in this SCC, then they may break the cycles.
bool haveDelayNode = false;
auto next = sccStack.getFirst();
// Tracks in this SCC are identified by mCycleMarker <= cycleStackMarker.
// (There may be other tracks later in sccStack from other incompletely
// searched SCCs, involving tracks still on dfsStack.)
//
// DelayNodes in cycles must behave differently from those not in cycles,
// so all DelayNodes in the SCC must be identified.
while (next && static_cast<ProcessedMediaTrack*>(next)->mCycleMarker <=
cycleStackMarker) {
auto nt = next->AsAudioNodeTrack();
// Get next before perhaps removing from list below.
next = next->getNext();
if (nt && nt->Engine()->AsDelayNodeEngine()) {
haveDelayNode = true;
// DelayNodes break cycles by producing their output in a
// preprocessing phase; they do not need to be ordered before their
// consumers. Order them at the tail of mTracks so that they can be
// handled specially. Do so now, so that DFS ignores them.
nt->remove();
nt->mCycleMarker = 0;
--mFirstCycleBreaker;
mTracks[mFirstCycleBreaker] = nt;
}
}
auto after_scc = next;
while ((next = sccStack.getFirst()) != after_scc) {
next->remove();
auto removed = static_cast<ProcessedMediaTrack*>(next);
if (haveDelayNode) {
// Return tracks to the DFS stack again (to order and detect cycles
// without delayNodes). Any of these tracks that are still inputs
// for tracks on the visited stack must be returned to the front of
// the stack to be ordered before their dependents. We know that none
// of these tracks need input from tracks on the visited stack, so
// they can all be searched and ordered before the current stack head
// is popped.
removed->mCycleMarker = NOT_VISITED;
dfsStack.insertFront(removed);
} else {
// Tracks in cycles without any DelayNodes must be muted, and so do
// not need input and can be ordered now. They must be ordered before
// their consumers so that their muted output is available.
removed->mCycleMarker = IN_MUTED_CYCLE;
mTracks[orderedTrackCount] = removed;
++orderedTrackCount;
}
}
}
MOZ_ASSERT(orderedTrackCount == mFirstCycleBreaker);
}
TrackTime MediaTrackGraphImpl::PlayAudio(AudioMixer* aMixer,
const TrackKeyAndVolume& aTkv,
GraphTime aPlayedTime) {
MOZ_ASSERT(OnGraphThread());
MOZ_ASSERT(mRealtime, "Should only attempt to play audio in realtime mode");
MOZ_ASSERT(aMixer, "Can only play audio if there's a mixer");
TrackTime ticksWritten = 0;
ticksWritten = 0;
MediaTrack* track = aTkv.mTrack;
AudioSegment* audio = track->GetData<AudioSegment>();
AudioSegment output;
TrackTime offset = track->GraphTimeToTrackTime(aPlayedTime);
// We don't update Track->mTracksStartTime here to account for time spent
// blocked. Instead, we'll update it in UpdateCurrentTimeForTracks after
// the blocked period has completed. But we do need to make sure we play
// from the right offsets in the track buffer, even if we've already
// written silence for some amount of blocked time after the current time.
GraphTime t = aPlayedTime;
while (t < mStateComputedTime) {
bool blocked = t >= track->mStartBlocking;
GraphTime end = blocked ? mStateComputedTime : track->mStartBlocking;
NS_ASSERTION(end <= mStateComputedTime, "mStartBlocking is wrong!");
// Check how many ticks of sound we can provide if we are blocked some
// time in the middle of this cycle.
TrackTime toWrite = end - t;
if (blocked) {
output.InsertNullDataAtStart(toWrite);
ticksWritten += toWrite;
LOG(LogLevel::Verbose,
("%p: MediaTrack %p writing %" PRId64 " blocking-silence samples for "
"%f to %f (%" PRId64 " to %" PRId64 ")",
this, track, toWrite, MediaTimeToSeconds(t), MediaTimeToSeconds(end),
offset, offset + toWrite));
} else {
TrackTime endTicksNeeded = offset + toWrite;
TrackTime endTicksAvailable = audio->GetDuration();
if (endTicksNeeded <= endTicksAvailable) {
LOG(LogLevel::Verbose,
("%p: MediaTrack %p writing %" PRId64 " samples for %f to %f "
"(samples %" PRId64 " to %" PRId64 ")",
this, track, toWrite, MediaTimeToSeconds(t),
MediaTimeToSeconds(end), offset, endTicksNeeded));
output.AppendSlice(*audio, offset, endTicksNeeded);
ticksWritten += toWrite;
offset = endTicksNeeded;
} else {
// MOZ_ASSERT(track->IsEnded(), "Not enough data, and track not
// ended."); If we are at the end of the track, maybe write the
// remaining samples, and pad with/output silence.
if (endTicksNeeded > endTicksAvailable && offset < endTicksAvailable) {
output.AppendSlice(*audio, offset, endTicksAvailable);
LOG(LogLevel::Verbose,
("%p: MediaTrack %p writing %" PRId64 " samples for %f to %f "
"(samples %" PRId64 " to %" PRId64 ")",
this, track, toWrite, MediaTimeToSeconds(t),
MediaTimeToSeconds(end), offset, endTicksNeeded));
uint32_t available = endTicksAvailable - offset;
ticksWritten += available;
toWrite -= available;
offset = endTicksAvailable;
}
output.AppendNullData(toWrite);
LOG(LogLevel::Verbose,
("%p MediaTrack %p writing %" PRId64 " padding slsamples for %f to "
"%f (samples %" PRId64 " to %" PRId64 ")",
this, track, toWrite, MediaTimeToSeconds(t),
MediaTimeToSeconds(end), offset, endTicksNeeded));
ticksWritten += toWrite;
}
output.ApplyVolume(mGlobalVolume * aTkv.mVolume);
}
t = end;
uint32_t outputChannels;
// Use the number of channel the driver expects: this is the number of
// channel that can be output by the underlying system level audio stream.
// Fall back to something sensible if this graph is being driven by a normal
// thread (this can happen when there are no output devices, etc.).
if (CurrentDriver()->AsAudioCallbackDriver()) {
outputChannels =
CurrentDriver()->AsAudioCallbackDriver()->OutputChannelCount();
} else {
outputChannels = AudioOutputChannelCount();
}
output.WriteTo(*aMixer, outputChannels, mSampleRate);
}
return ticksWritten;
}
ProcessedMediaTrack* MediaTrackGraphImpl::GetDeviceTrack(
CubebUtils::AudioDeviceID aID) {
MOZ_ASSERT(NS_IsMainThread());
RefPtr<NativeInputTrack>& t = mDeviceTracks.LookupOrInsertWith(
aID, [self = RefPtr<MediaTrackGraphImpl>(this), aID] {
NativeInputTrack* track = NativeInputTrack::Create(self);
LOG(LogLevel::Debug,
("Create NativeInputTrack %p for device %p", track, aID));
return do_AddRef(track);
});
return t.get();
}
void MediaTrackGraphImpl::OpenAudioInputImpl(CubebUtils::AudioDeviceID aID,
AudioDataListener* aListener,
NativeInputTrack* aInputTrack) {
MOZ_ASSERT(OnGraphThread());
LOG(LogLevel::Debug,
("%p OpenAudioInputImpl: NativeInputTrack %p for device %p", this,
aInputTrack, aID));
if (mDeviceTrackMap.Count() > 0 && !mDeviceTrackMap.Get(aID, nullptr)) {
// We don't support opening multiple input device in a graph for now.
LOG(LogLevel::Debug, ("%p Device %p is not native device. Cannot open %p!",
this, aID, aInputTrack));
return;
}
LOG(LogLevel::Debug,
("%p Device %p is native device. Open %p", this, aID, aInputTrack));
// Only allow one device per MTG (hence, per document), but allow opening a
// device multiple times
NativeInputTrack* track = mDeviceTrackMap.LookupOrInsertWith(
aID, [inputTrack = RefPtr<NativeInputTrack>(aInputTrack)] {
return inputTrack.get();
});
MOZ_ASSERT(track);
nsTArray<RefPtr<AudioDataListener>>& listeners = track->mDataUsers;
MOZ_ASSERT(!listeners.Contains(aListener), "Don't add a listener twice.");
listeners.AppendElement(aListener);
if (listeners.Length() == 1) { // first open for this device
mInputDeviceID = aID;
// Switch Drivers since we're adding input (to input-only or full-duplex)
AudioCallbackDriver* driver = new AudioCallbackDriver(
this, CurrentDriver(), mSampleRate, AudioOutputChannelCount(),
AudioInputChannelCount(), mOutputDeviceID, mInputDeviceID,
AudioInputDevicePreference());
LOG(LogLevel::Debug,
("%p OpenAudioInput: starting new AudioCallbackDriver(input) %p", this,
driver));
SwitchAtNextIteration(driver);
}
}
nsresult MediaTrackGraphImpl::OpenAudioInput(CubebUtils::AudioDeviceID aID,
AudioDataListener* aListener) {
MOZ_ASSERT(NS_IsMainThread());
class Message : public ControlMessage {
public:
Message(MediaTrackGraphImpl* aGraph, CubebUtils::AudioDeviceID aID,
AudioDataListener* aListener, NativeInputTrack* aInputTrack)
: ControlMessage(nullptr),
mGraph(aGraph),
mID(aID),
mListener(aListener),
mInputTrack(aInputTrack) {}
void Run() override {
TRACE("MTG::OpenAudioInputImpl ControlMessage");
mGraph->OpenAudioInputImpl(mID, mListener, mInputTrack);
}
MediaTrackGraphImpl* mGraph;
CubebUtils::AudioDeviceID mID;
RefPtr<AudioDataListener> mListener;
NativeInputTrack* mInputTrack;
};
auto result = mDeviceTracks.Lookup(aID);
MOZ_ASSERT(result);
MOZ_ASSERT(result.Data());
size_t users = result.Data()->AddUser();
LOG(LogLevel::Debug,
("%p OpenInput: NativeInputTrack %p for device %p has %zu users now",
this, result.Data().get(), aID, users));
// XXX Check not destroyed!
this->AppendMessage(
MakeUnique<Message>(this, aID, aListener, result.Data().get()));
return NS_OK;
}
void MediaTrackGraphImpl::CloseAudioInputImpl(CubebUtils::AudioDeviceID aID,
AudioDataListener* aListener,
NativeInputTrack* aInputTrack) {
MOZ_ASSERT(OnGraphThread());
LOG(LogLevel::Debug,
("%p CloseAudioInputImpl: NativeInputTrack %p for device %p", this,
aInputTrack, aID));
auto result = mDeviceTrackMap.Lookup(aID);
if (!result) {
LOG(LogLevel::Debug,
("%p Device %p is not native device. Do nothing for %p", this, aID,
aInputTrack));
return;
}
LOG(LogLevel::Debug,
("%p Device %p is native device. Close %p", this, aID, aInputTrack));
NativeInputTrack* track = result.Data();
MOZ_ASSERT(track == aInputTrack);
nsTArray<RefPtr<AudioDataListener>>& listeners = track->mDataUsers;
bool wasPresent = listeners.RemoveElement(aListener);
MOZ_ASSERT(wasPresent);
if (wasPresent) {
aListener->NotifyInputStopped(this);
}
// Breaks the cycle between the MTG and the listener.
aListener->Disconnect(this);
if (!listeners.IsEmpty()) {
LOG(LogLevel::Debug,
("%p NativeInputTrack %p for device %p still has consumer", this, track,
aID));
return;
}
LOG(LogLevel::Debug,
("%p NativeInputTrack %p for device %p has no consumer now", this, track,
aID));
mInputDeviceID = nullptr; // reset to default
bool r = mDeviceTrackMap.Remove(aID);
MOZ_ASSERT(r);
Unused << r;
// Switch Drivers since we're adding or removing an input (to nothing/system
// or output only)
bool audioTrackPresent = AudioTrackPresent();
GraphDriver* driver;
if (audioTrackPresent) {
// We still have audio output
LOG(LogLevel::Debug,
("%p: CloseInput: output present (AudioCallback)", this));
driver = new AudioCallbackDriver(
this, CurrentDriver(), mSampleRate, AudioOutputChannelCount(),
AudioInputChannelCount(), mOutputDeviceID, mInputDeviceID,
AudioInputDevicePreference());
SwitchAtNextIteration(driver);
} else if (CurrentDriver()->AsAudioCallbackDriver()) {
LOG(LogLevel::Debug,
("%p: CloseInput: no output present (SystemClockCallback)", this));
driver = new SystemClockDriver(this, CurrentDriver(), mSampleRate);
SwitchAtNextIteration(driver);
} // else SystemClockDriver->SystemClockDriver, no switch
}
void MediaTrackGraphImpl::RegisterAudioOutput(MediaTrack* aTrack, void* aKey) {
MOZ_ASSERT(OnGraphThread());
TrackKeyAndVolume* tkv = mAudioOutputs.AppendElement();
tkv->mTrack = aTrack;
tkv->mKey = aKey;
tkv->mVolume = 1.0;
if (!CurrentDriver()->AsAudioCallbackDriver() && !Switching()) {
AudioCallbackDriver* driver = new AudioCallbackDriver(
this, CurrentDriver(), mSampleRate, AudioOutputChannelCount(),
AudioInputChannelCount(), mOutputDeviceID, mInputDeviceID,
AudioInputDevicePreference());
SwitchAtNextIteration(driver);
}
}
void MediaTrackGraphImpl::UnregisterAllAudioOutputs(MediaTrack* aTrack) {
MOZ_ASSERT(OnGraphThreadOrNotRunning());
mAudioOutputs.RemoveElementsBy([aTrack](const TrackKeyAndVolume& aTkv) {
return aTkv.mTrack == aTrack;
});
}
void MediaTrackGraphImpl::UnregisterAudioOutput(MediaTrack* aTrack,
void* aKey) {
MOZ_ASSERT(OnGraphThreadOrNotRunning());
mAudioOutputs.RemoveElementsBy(
[&aKey, &aTrack](const TrackKeyAndVolume& aTkv) {
return aTkv.mKey == aKey && aTkv.mTrack == aTrack;
});
}
void MediaTrackGraphImpl::CloseAudioInput(CubebUtils::AudioDeviceID aID,
AudioDataListener* aListener) {
MOZ_ASSERT(NS_IsMainThread());
class Message : public ControlMessage {
public:
Message(MediaTrackGraphImpl* aGraph, CubebUtils::AudioDeviceID aID,
AudioDataListener* aListener, NativeInputTrack* aInputTrack)
: ControlMessage(nullptr),
mGraph(aGraph),
mID(aID),
mListener(aListener),
mInputTrack(aInputTrack) {}
void Run() override {
TRACE("MTG::CloseAudioInputImpl ControlMessage");
mGraph->CloseAudioInputImpl(mID, mListener, mInputTrack);
}
MediaTrackGraphImpl* mGraph;
CubebUtils::AudioDeviceID mID;
RefPtr<AudioDataListener> mListener;
NativeInputTrack* mInputTrack;
};
auto result = mDeviceTracks.Lookup(aID);
MOZ_ASSERT(result);
MOZ_ASSERT(result.Data());
size_t users = result.Data()->RemoveUser();
LOG(LogLevel::Debug,
("%p: CloseInput: NativeInputTrack %p for device %p has %zu users now",
this, result.Data().get(), aID, users));
this->AppendMessage(
MakeUnique<Message>(this, aID, aListener, result.Data().get()));
// Remove the NativeInputTrack from mDeviceTracks if no AudioInputTrack needs
// it, so NativeInputTrack::Create can create a new NativeInputTrack when it's
// called for the same aID. The paired value in mDeviceTrackMap will be
// removed later in CloseAudioInputImpl. The NativeInputTrack will still be
// alive after it's removed from mDeviceTracks since AddTrack called via
// NativeInputTrack::Create will call NS_ADDREF to it and it will be alive
// until its NS_RELEASE is called via NativeInputTrack::DestroyImpl().
// Note that NativeInputTrack::Destroy() must be called after the above
// message is appended so NativeInputTrack::DestroyImpl() will be run after
// CloseAudioInputImpl(). Therefore, the NativeInputTrack will be alive before
// it's removed from mDeviceTrackMap in CloseAudioInputImpl()
if (users == 0) {
LOG(LogLevel::Debug,
("%p: CloseInput: NativeInputTrack %p for device %p is removed from "
"mDeviceTracks",
this, result.Data().get(), aID));
result.Data()->Destroy();
bool r = mDeviceTracks.Remove(aID);
MOZ_ASSERT(r);
Unused << r;
}
}
// All AudioInput listeners get the same speaker data (at least for now).
void MediaTrackGraphImpl::NotifyOutputData(AudioDataValue* aBuffer,
size_t aFrames, TrackRate aRate,
uint32_t aChannels) {
#ifdef ANDROID
// On Android, mInputDeviceID is always null and represents the default
// device.
// The absence of an input consumer is enough to know we need to bail out
// here.
if (!mDeviceTrackMap.Contains(mInputDeviceID)) {
return;
}
#else
if (!mInputDeviceID) {
return;
}
#endif
// When/if we decide to support multiple input devices per graph, this needs
// to loop over them.
auto result = mDeviceTrackMap.Lookup(mInputDeviceID);
MOZ_ASSERT(result);
NativeInputTrack* track = result.Data();
MOZ_ASSERT(track);
track->NotifyOutputData(this, aBuffer, aFrames, aRate, aChannels);
}
void MediaTrackGraphImpl::NotifyInputStopped() {
#ifdef ANDROID
if (!mDeviceTrackMap.Contains(mInputDeviceID)) {
return;
}
#else
if (!mInputDeviceID) {
return;
}
#endif
auto result = mDeviceTrackMap.Lookup(mInputDeviceID);
MOZ_ASSERT(result);
NativeInputTrack* track = result.Data();
MOZ_ASSERT(track);
track->NotifyInputStopped(this);
}
void MediaTrackGraphImpl::NotifyInputData(const AudioDataValue* aBuffer,
size_t aFrames, TrackRate aRate,
uint32_t aChannels,
uint32_t aAlreadyBuffered) {
#ifdef ANDROID
if (!mDeviceTrackMap.Contains(mInputDeviceID)) {
return;
}
#else
// Either we have an audio input device, or we just removed the audio input
// this iteration, and we're switching back to an output-only driver next
// iteration.
MOZ_ASSERT(mInputDeviceID || Switching());
if (!mInputDeviceID) {
return;
}
#endif
auto result = mDeviceTrackMap.Lookup(mInputDeviceID);
MOZ_ASSERT(result);
NativeInputTrack* track = result.Data();
MOZ_ASSERT(track);
track->NotifyInputData(this, aBuffer, aFrames, aRate, aChannels,
aAlreadyBuffered);
}
void MediaTrackGraphImpl::DeviceChangedImpl() {
MOZ_ASSERT(OnGraphThread());
#ifdef ANDROID
if (!mDeviceTrackMap.Contains(mInputDeviceID)) {
return;
}
#else
if (!mInputDeviceID) {
return;
}
#endif
auto result = mDeviceTrackMap.Lookup(mInputDeviceID);
MOZ_ASSERT(result);
NativeInputTrack* track = result.Data();
MOZ_ASSERT(track);
track->DeviceChanged(this);
}
void MediaTrackGraphImpl::SetMaxOutputChannelCount(uint32_t aMaxChannelCount) {
MOZ_ASSERT(OnGraphThread());
mMaxOutputChannelCount = aMaxChannelCount;
}
void MediaTrackGraphImpl::DeviceChanged() {
// This is safe to be called from any thread: this message comes from an
// underlying platform API, and we don't have much guarantees. If it is not
// called from the main thread (and it probably will rarely be), it will post
// itself to the main thread, and the actual device change message will be ran
// and acted upon on the graph thread.
if (!NS_IsMainThread()) {
RefPtr<nsIRunnable> runnable = WrapRunnable(
RefPtr<MediaTrackGraphImpl>(this), &MediaTrackGraphImpl::DeviceChanged);
mMainThread->Dispatch(runnable.forget());
return;
}
class Message : public ControlMessage {
public:
explicit Message(MediaTrackGraph* aGraph)
: ControlMessage(nullptr),
mGraphImpl(static_cast<MediaTrackGraphImpl*>(aGraph)) {}
void Run() override {
TRACE("MTG::DeviceChangeImpl ControlMessage");
mGraphImpl->DeviceChangedImpl();
}
// We know that this is valid, because the graph can't shutdown if it has
// messages.
MediaTrackGraphImpl* mGraphImpl;
};
if (mMainThreadTrackCount == 0 && mMainThreadPortCount == 0) {
// This is a special case where the origin of this event cannot control the
// lifetime of the graph, because the graph is controling the lifetime of
// the AudioCallbackDriver where the event originated.
// We know the graph is soon going away, so there's no need to notify about
// this device change.
return;
}
// Reset the latency, it will get fetched again next time it's queried.
MOZ_ASSERT(NS_IsMainThread());
mAudioOutputLatency = 0.0;
// Dispatch to the bg thread to do the (potentially expensive) query of the
// maximum channel count, and then dispatch back to the main thread, then to
// the graph, with the new info.
RefPtr<MediaTrackGraphImpl> self = this;
NS_DispatchBackgroundTask(NS_NewRunnableFunction(
"MaxChannelCountUpdateOnBgThread", [self{move(self)}]() {
uint32_t maxChannelCount = CubebUtils::MaxNumberOfChannels();
self->Dispatch(NS_NewRunnableFunction(
"MaxChannelCountUpdateToMainThread",
[self{self}, maxChannelCount]() {
class MessageToGraph : public ControlMessage {
public:
explicit MessageToGraph(MediaTrackGraph* aGraph,
uint32_t aMaxChannelCount)
: ControlMessage(nullptr),
mGraphImpl(static_cast<MediaTrackGraphImpl*>(aGraph)),
mMaxChannelCount(aMaxChannelCount) {}
void Run() override {
TRACE("MTG::SetMaxOutputChannelCount ControlMessage")
mGraphImpl->SetMaxOutputChannelCount(mMaxChannelCount);
}
MediaTrackGraphImpl* mGraphImpl;
uint32_t mMaxChannelCount;
};
self->AppendMessage(
MakeUnique<MessageToGraph>(self, maxChannelCount));
}));
}));
AppendMessage(MakeUnique<Message>(this));
}
void MediaTrackGraphImpl::ReevaluateInputDevice() {
MOZ_ASSERT(OnGraphThread());
bool needToSwitch = false;
if (CurrentDriver()->AsAudioCallbackDriver()) {
AudioCallbackDriver* audioCallbackDriver =
CurrentDriver()->AsAudioCallbackDriver();
if (audioCallbackDriver->InputChannelCount() != AudioInputChannelCount()) {
needToSwitch = true;
}
if (audioCallbackDriver->InputDevicePreference() !=
AudioInputDevicePreference()) {
needToSwitch = true;
}
} else {
// We're already in the process of switching to a audio callback driver,
// which will happen at the next iteration.
// However, maybe it's not the correct number of channels. Re-query the
// correct channel amount at this time.
MOZ_ASSERT(Switching());
needToSwitch = true;
}
if (needToSwitch) {
AudioCallbackDriver* newDriver = new AudioCallbackDriver(
this, CurrentDriver(), mSampleRate, AudioOutputChannelCount(),
AudioInputChannelCount(), mOutputDeviceID, mInputDeviceID,
AudioInputDevicePreference());
SwitchAtNextIteration(newDriver);
}
}
bool MediaTrackGraphImpl::OnGraphThreadOrNotRunning() const {
// either we're on the right thread (and calling CurrentDriver() is safe),
// or we're going to fail the assert anyway, so don't cross-check
// via CurrentDriver().
return mGraphDriverRunning ? OnGraphThread() : NS_IsMainThread();
}
bool MediaTrackGraphImpl::OnGraphThread() const {
// we're on the right thread (and calling mDriver is safe),
MOZ_ASSERT(mDriver);
if (mGraphRunner && mGraphRunner->OnThread()) {
return true;
}
return mDriver->OnThread();
}
bool MediaTrackGraphImpl::Destroyed() const {
MOZ_ASSERT(NS_IsMainThread());
return !mSelfRef;
}
bool MediaTrackGraphImpl::ShouldUpdateMainThread() {
MOZ_ASSERT(OnGraphThreadOrNotRunning());
if (mRealtime) {
return true;
}
TimeStamp now = TimeStamp::Now();
// For offline graphs, update now if it has been long enough since the last
// update, or if it has reached the end.
if ((now - mLastMainThreadUpdate).ToMilliseconds() >
CurrentDriver()->IterationDuration() ||
mStateComputedTime >= mEndTime) {
mLastMainThreadUpdate = now;
return true;
}
return false;
}
void MediaTrackGraphImpl::PrepareUpdatesToMainThreadState(bool aFinalUpdate) {
MOZ_ASSERT(OnGraphThreadOrNotRunning());
mMonitor.AssertCurrentThreadOwns();
// We don't want to frequently update the main thread about timing update
// when we are not running in realtime.
if (aFinalUpdate || ShouldUpdateMainThread()) {
// Strip updates that will be obsoleted below, so as to keep the length of
// mTrackUpdates sane.
size_t keptUpdateCount = 0;
for (size_t i = 0; i < mTrackUpdates.Length(); ++i) {
MediaTrack* track = mTrackUpdates[i].mTrack;
// RemoveTrackGraphThread() clears mTrack in updates for
// tracks that are removed from the graph.
MOZ_ASSERT(!track || track->GraphImpl() == this);
if (!track || track->MainThreadNeedsUpdates()) {
// Discard this update as it has either been cleared when the track
// was destroyed or there will be a newer update below.
continue;
}
if (keptUpdateCount != i) {
mTrackUpdates[keptUpdateCount] = move(mTrackUpdates[i]);
MOZ_ASSERT(!mTrackUpdates[i].mTrack);
}
++keptUpdateCount;
}
mTrackUpdates.TruncateLength(keptUpdateCount);
mTrackUpdates.SetCapacity(mTrackUpdates.Length() + mTracks.Length() +
mSuspendedTracks.Length());
for (MediaTrack* track : AllTracks()) {
if (!track->MainThreadNeedsUpdates()) {
continue;
}
TrackUpdate* update = mTrackUpdates.AppendElement();
update->mTrack = track;
// No blocking to worry about here, since we've passed
// UpdateCurrentTimeForTracks.
update->mNextMainThreadCurrentTime =
track->GraphTimeToTrackTime(mProcessedTime);
update->mNextMainThreadEnded = track->mNotifiedEnded;
}
mNextMainThreadGraphTime = mProcessedTime;
if (!mPendingUpdateRunnables.IsEmpty()) {
mUpdateRunnables.AppendElements(move(mPendingUpdateRunnables));
}
}
// If this is the final update, then a stable state event will soon be
// posted just before this thread finishes, and so there is no need to also
// post here.
if (!aFinalUpdate &&
// Don't send the message to the main thread if it's not going to have
// any work to do.
!(mUpdateRunnables.IsEmpty() && mTrackUpdates.IsEmpty())) {
EnsureStableStateEventPosted();
}
}
GraphTime MediaTrackGraphImpl::RoundUpToEndOfAudioBlock(GraphTime aTime) {
if (aTime % WEBAUDIO_BLOCK_SIZE == 0) {
return aTime;
}
return RoundUpToNextAudioBlock(aTime);
}
GraphTime MediaTrackGraphImpl::RoundUpToNextAudioBlock(GraphTime aTime) {
uint64_t block = aTime >> WEBAUDIO_BLOCK_SIZE_BITS;
uint64_t nextBlock = block + 1;
GraphTime nextTime = nextBlock << WEBAUDIO_BLOCK_SIZE_BITS;
return nextTime;
}
void MediaTrackGraphImpl::ProduceDataForTracksBlockByBlock(
uint32_t aTrackIndex, TrackRate aSampleRate) {
MOZ_ASSERT(OnGraphThread());
MOZ_ASSERT(aTrackIndex <= mFirstCycleBreaker,
"Cycle breaker is not AudioNodeTrack?");
while (mProcessedTime < mStateComputedTime) {
// Microtask checkpoints are in between render quanta.
nsAutoMicroTask mt;
GraphTime next = RoundUpToNextAudioBlock(mProcessedTime);
for (uint32_t i = mFirstCycleBreaker; i < mTracks.Length(); ++i) {
auto nt = static_cast<AudioNodeTrack*>(mTracks[i]);
MOZ_ASSERT(nt->AsAudioNodeTrack());
nt->ProduceOutputBeforeInput(mProcessedTime);
}
for (uint32_t i = aTrackIndex; i < mTracks.Length(); ++i) {
ProcessedMediaTrack* pt = mTracks[i]->AsProcessedTrack();
if (pt) {
if (pt->AsNativeInputTrack()) {
// NativeInputTracks are processed in Process. Skip them.
continue;
}
pt->ProcessInput(
mProcessedTime, next,
(next == mStateComputedTime) ? ProcessedMediaTrack::ALLOW_END : 0);
}
}
mProcessedTime = next;
}
NS_ASSERTION(mProcessedTime == mStateComputedTime,
"Something went wrong with rounding to block boundaries");
}
void MediaTrackGraphImpl::RunMessageAfterProcessing(
UniquePtr<ControlMessage> aMessage) {
MOZ_ASSERT(OnGraphThread());
if (mFrontMessageQueue.IsEmpty()) {
mFrontMessageQueue.AppendElement();
}
// Only one block is used for messages from the graph thread.
MOZ_ASSERT(mFrontMessageQueue.Length() == 1);
mFrontMessageQueue[0].mMessages.AppendElement(move(aMessage));
}
void MediaTrackGraphImpl::RunMessagesInQueue() {
TRACE("MTG::RunMessagesInQueue");
MOZ_ASSERT(OnGraphThread());
// Calculate independent action times for each batch of messages (each
// batch corresponding to an event loop task). This isolates the performance
// of different scripts to some extent.
for (uint32_t i = 0; i < mFrontMessageQueue.Length(); ++i) {
nsTArray<UniquePtr<ControlMessage>>& messages =
mFrontMessageQueue[i].mMessages;
for (uint32_t j = 0; j < messages.Length(); ++j) {
TRACE("ControlMessage::Run");
messages[j]->Run();
}
}
mFrontMessageQueue.Clear();
}
void MediaTrackGraphImpl::UpdateGraph(GraphTime aEndBlockingDecisions) {
TRACE("MTG::UpdateGraph");
MOZ_ASSERT(OnGraphThread());
MOZ_ASSERT(aEndBlockingDecisions >= mProcessedTime);
// The next state computed time can be the same as the previous: it
// means the driver would have been blocking indefinitly, but the graph has
// been woken up right after having been to sleep.
MOZ_ASSERT(aEndBlockingDecisions >= mStateComputedTime);
CheckDriver();
UpdateTrackOrder();
// Always do another iteration if there are tracks waiting to resume.
bool ensureNextIteration = !mPendingResumeOperations.IsEmpty();
for (MediaTrack* track : mTracks) {
if (SourceMediaTrack* is = track->AsSourceTrack()) {
ensureNextIteration |= is->PullNewData(aEndBlockingDecisions);
is->ExtractPendingInput(mStateComputedTime, aEndBlockingDecisions);
}
if (track->mEnded) {
// The track's not suspended, and since it's ended, underruns won't
// stop it playing out. So there's no blocking other than what we impose
// here.
GraphTime endTime = track->GetEnd() + track->mStartTime;
if (endTime <= mStateComputedTime) {
LOG(LogLevel::Verbose,
("%p: MediaTrack %p is blocked due to being ended", this, track));
track->mStartBlocking = mStateComputedTime;
} else {
LOG(LogLevel::Verbose,
("%p: MediaTrack %p has ended, but is not blocked yet (current "
"time %f, end at %f)",
this, track, MediaTimeToSeconds(mStateComputedTime),
MediaTimeToSeconds(endTime)));
// Data can't be added to a ended track, so underruns are irrelevant.
MOZ_ASSERT(endTime <= aEndBlockingDecisions);
track->mStartBlocking = endTime;
}
} else {
track->mStartBlocking = WillUnderrun(track, aEndBlockingDecisions);
#ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
if (SourceMediaTrack* s = track->AsSourceTrack()) {
if (s->Ended()) {
continue;
}
{
MutexAutoLock lock(s->mMutex);
if (!s->mUpdateTrack->mPullingEnabled) {
// The invariant that data must be provided is only enforced when
// pulling.
continue;
}
}
if (track->GetEnd() <
track->GraphTimeToTrackTime(aEndBlockingDecisions)) {
LOG(LogLevel::Error,
("%p: SourceMediaTrack %p (%s) is live and pulled, "
"but wasn't fed "
"enough data. TrackListeners=%zu. Track-end=%f, "
"Iteration-end=%f",
this, track,
(track->mType == MediaSegment::AUDIO ? "audio" : "video"),
track->mTrackListeners.Length(),
MediaTimeToSeconds(track->GetEnd()),
MediaTimeToSeconds(
track->GraphTimeToTrackTime(aEndBlockingDecisions))));
MOZ_DIAGNOSTIC_ASSERT(false,
"A non-ended SourceMediaTrack wasn't fed "
"enough data by NotifyPull");
}
}
#endif /* MOZ_DIAGNOSTIC_ASSERT_ENABLED */
}
}
for (MediaTrack* track : mSuspendedTracks) {
track->mStartBlocking = mStateComputedTime;
}
// If the loop is woken up so soon that IterationEnd() barely advances or
// if an offline graph is not currently rendering, we end up having
// aEndBlockingDecisions == mStateComputedTime.
// Since the process interval [mStateComputedTime, aEndBlockingDecision) is
// empty, Process() will not find any unblocked track and so will not
// ensure