/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* vim: set ts=8 sts=2 et sw=2 tw=80: */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// We want our default stack size limit to be approximately 2MB, to be safe for // JS helper tasks that can use a lot of stack, but expect most threads to use // much less. On Linux, however, requesting a stack of 2MB or larger risks the // kernel allocating an entire 2MB huge page for it on first access, which we do // not want. To avoid this possibility, we subtract 2 standard VM page sizes // from our default.
constexpr uint32_t kBaseStackSize = 2048 * 1024 - 2 * 4096;
// TSan enforces a minimum stack size that's just slightly larger than our // default helper stack size. It does this to store blobs of TSan-specific data // on each thread's stack. Unfortunately, that means that even though we'll // actually receive a larger stack than we requested, the effective usable space // of that stack is significantly less than what we expect. To offset TSan // stealing our stack space from underneath us, double the default. // // Similarly, ASan requires more stack space due to red-zones. #ifdefined(MOZ_TSAN) || defined(MOZ_ASAN)
constexpr uint32_t kStackSize = 2 * kBaseStackSize; #else
constexpr uint32_t kStackSize = kBaseStackSize; #endif
// This may be higher than mCurrentTask's priority due to priority // propagation. This is -only- valid when mCurrentTask != nullptr.
uint32_t mEffectiveTaskPriority = 0;
// This struct is duplicated below as 'IncompleteTaskMarker'. // Make sure you keep the two in sync. // The only difference between the two schemas is the type of the "task" field: // TaskMarker uses TerminatingFlow and IncompleteTaskMarker uses Flow. // We have two schemas so that we don't need to emit a separate marker for the // TerminatingFlow in the common case. struct TaskMarker : BaseMarkerType<TaskMarker> { static constexpr constchar* Name = "Task"; static constexpr constchar* Description = "Marker representing a task being executed in TaskController.";
// This is a duplicate of the code above with the format of the 'task' // field changed from `TerminatingFlow` to Flow` struct IncompleteTaskMarker : BaseMarkerType<IncompleteTaskMarker> { static constexpr constchar* Name = "Task"; static constexpr constchar* Description = "Marker representing a task being executed in TaskController.";
// Wrap task->Run() so that we can add markers for it
Task::TaskResult TaskController::RunTask(Task* aTask) { if (!profiler_is_collecting_markers()) { return aTask->Run();
}
if (mCurrentPriorityModifier != oldModifier) { returntrue;
}
} returnfalse;
}
#ifdef MOZ_COLLECTING_RUNNABLE_TELEMETRY class MOZ_RAII AutoSetMainThreadRunnableName { public: explicit AutoSetMainThreadRunnableName(const nsCString& aName) {
MOZ_ASSERT(NS_IsMainThread()); // We want to record our current runnable's name in a static so // that BHR can record it.
mRestoreRunnableName = nsThread::sMainThreadRunnableName;
// Copy the name into sMainThreadRunnableName's buffer, and append a // terminating null.
uint32_t length = std::min((uint32_t)nsThread::kRunnableNameBufSize - 1,
(uint32_t)aName.Length());
memcpy(nsThread::sMainThreadRunnableName.begin(), aName.BeginReading(),
length);
nsThread::sMainThreadRunnableName[length] = '\0';
}
while (!currentTask->mDependencies.empty()) { auto iter = currentTask->mDependencies.begin();
while (iter != currentTask->mDependencies.end()) { if ((*iter)->mCompleted) { auto oldIter = iter;
iter++; // Completed tasks are removed here to prevent needlessly keeping them // alive or iterating over them in the future.
currentTask->mDependencies.erase(oldIter); continue;
}
currentTask = iter->get(); break;
}
}
return currentTask == this ? nullptr : currentTask;
}
#ifdef MOZ_MEMORY // We choose to not disable lazy purge on our shutdown as this might do a // useless sync purge of all arenas during process shutdown. // Note that we already stopped scheduling new idle purges after // ShutdownPhase::AppShutdownConfirmed, so most likely it's already gone. if (sIdleMemoryCleanupRunner) {
sIdleMemoryCleanupRunner->Cancel();
sIdleMemoryCleanupRunner = nullptr;
} #endif
}
void TaskController::ShutdownThreadPoolInternal() {
{ // Prevent race condition on mShuttingDown and wait.
MutexAutoLock lock(mGraphMutex);
mShuttingDown = true; for (auto& thread : mPoolThreads) {
thread->mThreadCV.NotifyAll();
}
} for (auto& thread : mPoolThreads) {
PR_JoinThread(thread->mThread);
}
if (!taskCompleted) { // Presumably this task was interrupted, leave its dependencies // unresolved and reinsert into the queue. auto insertion = mThreadableTasks.insert(aThread->mCurrentTask);
MOZ_ASSERT(insertion.second);
task->mIterator = insertion.first;
} else {
task->mCompleted = true; #ifdef DEBUG
task->mIsInGraph = false; #endif
task->mDependencies.clear(); // This may have unblocked a main thread task. We could do this only // if there was a main thread task before this one in the dependency // chain.
mMayHaveMainThreadTask = true; // Since this could have multiple dependencies thare are restricted // to the main thread. Let's make sure that's awake.
EnsureMainThreadTasksScheduled();
// Clear the current task to mark ourselves idle.
RefPtr<Task> lastTask = aThread->mCurrentTask.forget();
mIdleThreadCount++;
MOZ_ASSERT(mIdleThreadCount <= mPoolThreads.size());
// Dispatch any other tasks that depended on this one.
DispatchThreadableTasks(lock);
// Ensure the last task is released before we enter the wait state. This // happens outside the lock. This is required since it's perfectly feasible // for task destructors to post events themselves.
{
MutexAutoUnlock unlock(mGraphMutex);
lastTask = nullptr;
}
}
if (task->GetKind() == Task::Kind::OffMainThreadOnly) {
MutexAutoLock lock(mPoolInitializationMutex); if (!mThreadPoolInitialized) {
InitializeThreadPool();
}
}
MutexAutoLock lock(mGraphMutex);
if (TaskManager* manager = task->GetManager()) { if (manager->mTaskCount == 0) {
mTaskManagers.insert(manager);
}
manager->DidQueueTask();
// Set this here since if this manager's priority modifier doesn't change // we will not reprioritize when iterating over the queue.
task->mPriorityModifier = manager->mCurrentPriorityModifier;
}
if (profiler_is_active_and_unpaused()) {
task->mInsertionTime = TimeStamp::Now();
}
// Search for the highest priority dependency of the highest priority task. for (const RefPtr<Task>& rootTask : mThreadableTasks) {
MOZ_ASSERT(!rootTask->mTaskManager);
for (;;) { // We only ever process one event here. However we may sometimes // not actually process a real event because of suspended tasks. // This loop allows us to wait until we've processed something // in that scenario.
if (mMTTaskRunnableProcessedTask || !aMayWait) { break;
}
#ifdef MOZ_ENABLE_BACKGROUND_HANG_MONITOR // Unlock before calling into the BackgroundHangMonitor API as it uses // the timer API.
{
MutexAutoUnlock unlock(mGraphMutex);
BackgroundHangMonitor().NotifyWait();
} #endif
{ // ProcessNextEvent will also have attempted to wait, however we may have // given it a Runnable when all the tasks in our task graph were suspended // but we weren't able to cheaply determine that.
AUTO_PROFILER_LABEL("TaskController::ProcessPendingMTTask", IDLE);
mMainThreadCV.Wait();
}
bool TaskController::HasMainThreadPendingTasks() {
MOZ_ASSERT(NS_IsMainThread()); auto resetIdleState = MakeScopeExit([&idleManager = mIdleTaskManager] { if (idleManager) {
idleManager->State().ClearCachedIdleDeadline();
}
});
for (bool considerIdle : {false, true}) { if (considerIdle && !mIdleTaskManager) { continue;
}
MutexAutoLock lock(mGraphMutex);
if (considerIdle) {
mIdleTaskManager->State().ForgetPendingTaskGuarantee(); // Temporarily unlock so we can peek our idle deadline. // XXX We could do this _before_ we take the lock if the API would let us. // We do want to do this before looking at mMainThreadTasks, in case // someone adds one while we're unlocked.
{
MutexAutoUnlock unlock(mGraphMutex);
mIdleTaskManager->State().CachePeekedIdleDeadline(unlock);
}
}
// Return early if there's no tasks at all. if (mMainThreadTasks.empty()) { returnfalse;
}
// We can cheaply count how many tasks are suspended.
uint64_t totalSuspended = 0; for (TaskManager* manager : mTaskManagers) {
DebugOnly<bool> modifierChanged =
manager
->UpdateCachesForCurrentIterationAndReportPriorityModifierChanged(
lock, TaskManager::IterationType::NOT_EVENT_LOOP_TURN);
MOZ_ASSERT(!modifierChanged);
// The idle manager should be suspended unless we're doing the idle pass.
MOZ_ASSERT(manager != mIdleTaskManager || manager->mCurrentSuspended ||
considerIdle, "Why are idle tasks not suspended here?");
if (manager->mCurrentSuspended) { // XXX - If managers manage off-main-thread tasks this breaks! This // scenario is explicitly not supported. // // This is only incremented inside the lock -or- decremented on the main // thread so this is safe.
totalSuspended += manager->mTaskCount;
}
}
// This would break down if we have a non-suspended task depending on a // suspended task. This is why for the moment we do not allow tasks // to be dependent on tasks managed by another taskmanager. if (mMainThreadTasks.size() > totalSuspended) { // If mIdleTaskManager->mTaskCount is 0, we never updated the suspended // state of mIdleTaskManager above, hence shouldn't even check it here. // But in that case idle tasks are not contributing to our suspended task // count anyway. if (mIdleTaskManager && mIdleTaskManager->mTaskCount &&
!mIdleTaskManager->mCurrentSuspended) {
MOZ_ASSERT(considerIdle, "Why is mIdleTaskManager not suspended?"); // Check whether the idle tasks were really needed to make our "we have // an unsuspended task" decision. If they were, we need to force-enable // idle tasks until we run our next task. if (mMainThreadTasks.size() - mIdleTaskManager->mTaskCount <=
totalSuspended) {
mIdleTaskManager->State().EnforcePendingTaskGuarantee();
}
} returntrue;
}
} returnfalse;
}
void TaskController::MayScheduleIdleMemoryCleanup() { // We want to schedule an idle task only if we: // - know to be about to become idle // - are not shutting down // - have not yet an active IdleTaskRunner // - have something to cleanup if (PendingMainthreadTaskCountIncludingSuspended() > 0) { // This is a hot code path for the main thread, so please do not add // logic here or before. return;
} if (!mIsLazyPurgeEnabled) { return;
} if (AppShutdown::IsInOrBeyond(ShutdownPhase::AppShutdownConfirmed)) { if (sIdleMemoryCleanupRunner) {
sIdleMemoryCleanupRunner->Cancel();
sIdleMemoryCleanupRunner = nullptr;
} return;
}
if (!moz_may_purge_one_now(/* aPeekOnly */ true)) { // Currently we unqueue purge requests only if we run moz_may_purge_one_now // with aPeekOnly==false and that happens in the below IdleTaskRunner which // cancels itself when done (and all of this happens on the main thread // without possible races) OR if something else causes a MayPurgeAll (like // jemalloc_free_(excess)_dirty_pages or moz_set_max_dirty_page_modifier) // which can happen anytime (and even from other threads). if (sIdleMemoryCleanupRunner) {
sIdleMemoryCleanupRunner->Cancel();
sIdleMemoryCleanupRunner = nullptr;
} return;
} if (sIdleMemoryCleanupRunner) { return;
}
// Only create a marker if we really do something.
PROFILER_MARKER_TEXT("MayScheduleIdleMemoryCleanup", OTHER, {}, "Schedule for immediate run."_ns);
sIdleMemoryCleanupRunner = IdleTaskRunner::Create(
[](TimeStamp aDeadline) { bool pending = moz_may_purge_one_now(true); if (pending) {
AUTO_PROFILER_MARKER_TEXT( "DoIdleMemoryCleanup", OTHER, {}, "moz_may_purge_one_now until there is budget."_ns); while (pending) {
pending = moz_may_purge_one_now(false); if (!aDeadline.IsNull() && TimeStamp::Now() > aDeadline) { break;
}
}
} if (!pending && sIdleMemoryCleanupRunner) {
PROFILER_MARKER_TEXT("DoIdleMemoryCleanup", OTHER, {}, "Finished all cleanup."_ns);
sIdleMemoryCleanupRunner->Cancel();
sIdleMemoryCleanupRunner = nullptr;
}
// We never get here without attempting at least one purge call. returntrue;
}, "TaskController::IdlePurgeRunner", TimeDuration::FromMilliseconds(0),
maxPurgeDelay, minPurgeBudget, true, nullptr, nullptr); // We do not pass aMayStopProcessing, which would be the only legitimate // reason to return nullptr (OOM would crash), so no fallback needed.
MOZ_ASSERT(sIdleMemoryCleanupRunner);
} #endif
bool TaskController::ExecuteNextTaskOnlyMainThreadInternal( const MutexAutoLock& aProofOfLock) MOZ_REQUIRES(mGraphMutex) {
MOZ_ASSERT(NS_IsMainThread());
mGraphMutex.AssertCurrentThreadOwns(); // Block to make it easier to jump to our cleanup. bool taskRan = false; do {
taskRan = DoExecuteNextTaskOnlyMainThreadInternal(aProofOfLock); if (taskRan) { if (mIdleTaskManager && mIdleTaskManager->mTaskCount &&
mIdleTaskManager->IsSuspended(aProofOfLock)) {
uint32_t activeTasks = mMainThreadTasks.size(); for (TaskManager* manager : mTaskManagers) { if (manager->IsSuspended(aProofOfLock)) {
activeTasks -= manager->mTaskCount;
} else { break;
}
}
if (!activeTasks) { // We have only idle (and maybe other suspended) tasks left, so need // to update the idle state. We need to temporarily release the lock // while we do that.
MutexAutoUnlock unlock(mGraphMutex);
mIdleTaskManager->State().RequestIdleDeadlineIfNeeded(unlock);
}
} break;
}
if (!mIdleTaskManager) { break;
}
if (mIdleTaskManager->mTaskCount) { // We have idle tasks that we may not have gotten above because // our idle state is not up to date. We need to update the idle state // and try again. We need to temporarily release the lock while we do // that.
MutexAutoUnlock unlock(mGraphMutex);
mIdleTaskManager->State().UpdateCachedIdleDeadline(unlock);
} else {
MutexAutoUnlock unlock(mGraphMutex);
mIdleTaskManager->State().RanOutOfTasks(unlock);
}
// When we unlocked, someone may have queued a new task on us. So try to // see whether we can run things again.
taskRan = DoExecuteNextTaskOnlyMainThreadInternal(aProofOfLock);
} while (false);
if (mIdleTaskManager) { // The pending task guarantee is not needed anymore, since we just tried // running a task
mIdleTaskManager->State().ForgetPendingTaskGuarantee();
if (mMainThreadTasks.empty()) {
++mRunOutOfMTTasksCounter;
// XXX the IdlePeriodState API demands we have a MutexAutoUnlock for it. // Otherwise we could perhaps just do this after we exit the locked block, // by pushing the lock down into this method. Though it's not clear that // we could check mMainThreadTasks.size() once we unlock, and whether we // could maybe substitute mMayHaveMainThreadTask for that check.
MutexAutoUnlock unlock(mGraphMutex);
mIdleTaskManager->State().RanOutOfTasks(unlock);
}
}
// This would break down if we have a non-suspended task depending on a // suspended task. This is why for the moment we do not allow tasks // to be dependent on tasks managed by another taskmanager. if (mMainThreadTasks.size() > totalSuspended) { for (auto iter = mMainThreadTasks.begin(); iter != mMainThreadTasks.end();
iter++) {
Task* task = iter->get();
if (task->mTaskManager && task->mTaskManager->mCurrentSuspended) { // Even though we may want to run some dependencies of this task, we // will run them at their own priority level and not the priority // level of their dependents. continue;
}
{
MutexAutoUnlock unlock(mGraphMutex); if (manager) {
manager->WillRunTask(); if (manager != mIdleTaskManager) { // Notify the idle period state that we're running a non-idle task. // This needs to happen while our mutex is not locked!
mIdleTaskManager->State().FlagNotIdle();
} else {
TimeStamp idleDeadline =
mIdleTaskManager->State().GetCachedIdleDeadline();
MOZ_ASSERT(
idleDeadline, "How can we not have a deadline if our manager is enabled?");
task->SetIdleDeadline(idleDeadline);
}
} if (mIdleTaskManager) { // We found a task to run; we can clear the idle deadline on our idle // task manager. This _must_ be done before we actually run the task, // because running the task could reenter via spinning the event loop // and we want to make sure there's no cached idle deadline at that // point. But we have to make sure we do it after out SetIdleDeadline // call above, in the case when the task is actually an idle task.
mIdleTaskManager->State().ClearCachedIdleDeadline();
}
// Task itself should keep manager alive. if (manager && result && manager->mTaskCount == 0) {
mTaskManagers.erase(manager);
}
task->mInProgress = false;
if (!result) { // Presumably this task was interrupted, leave its dependencies // unresolved and reinsert into the queue. auto insertion =
mMainThreadTasks.insert(std::move(mCurrentTasksMT.top()));
MOZ_ASSERT(insertion.second);
task->mIterator = insertion.first; if (manager) {
manager->WillRunTask();
}
} else {
task->mCompleted = true; #ifdef DEBUG
task->mIsInGraph = false; #endif // Clear dependencies to release references.
task->mDependencies.clear();
// Dispatch any tasks that are now ready to run.
DispatchThreadableTasks(aProofOfLock);
}
mCurrentTasksMT.pop(); returntrue;
}
}
mMayHaveMainThreadTask = false; if (mIdleTaskManager) { // We did not find a task to run. We still need to clear the cached idle // deadline on our idle state, because that deadline was only relevant to // the execution of this function. Had we found a task, we would have // cleared the deadline before running that task.
mIdleTaskManager->State().ClearCachedIdleDeadline();
} returnfalse;
}
// This optimization prevents many slow lookups in long chains of similar // priority. if (!aTask->mDependencies.empty()) {
Task* firstDependency = aTask->mDependencies.begin()->get(); if (aTask->GetPriority() <= firstDependency->GetPriority() &&
!firstDependency->mCompleted &&
aTask->GetKind() == firstDependency->GetKind()) { // This task has the same or a higher priority as one of its dependencies, // never any need to interrupt. return;
}
}
if (finalDependency->mInProgress) { // No need to wake anything, we can't schedule this task right now anyway. return;
}
if (aTask->GetKind() == Task::Kind::MainThreadOnly) {
mMayHaveMainThreadTask = true;
EnsureMainThreadTasksScheduled();
if (mCurrentTasksMT.empty()) { return;
}
// We could go through the steps above here and interrupt an off main // thread task in case it has a lower priority. if (finalDependency->GetKind() == Task::Kind::OffMainThreadOnly) { return;
}
if (mCurrentTasksMT.top()->GetPriority() < aTask->GetPriority()) {
mCurrentTasksMT.top()->RequestInterrupt(aTask->GetPriority());
}
} else { if (mIdleThreadCount != 0) {
DispatchThreadableTasks(aProofOfLock);
// There was a free thread, no need to interrupt anything. return;
}
Task* lowestPriorityTask = nullptr; for (auto& thread : mPoolThreads) {
MOZ_ASSERT(thread->mCurrentTask); if (!lowestPriorityTask) {
lowestPriorityTask = thread->mCurrentTask.get(); continue;
}
// This should possibly select the lowest priority task which was started // the latest. But for now we ignore that optimization. // This also doesn't guarantee a task is interruptable, so that's an // avenue for improvements as well. if (lowestPriorityTask->GetPriority() > thread->mEffectiveTaskPriority) {
lowestPriorityTask = thread->mCurrentTask.get();
}
}
if (lowestPriorityTask->GetPriority() < aTask->GetPriority()) {
lowestPriorityTask->RequestInterrupt(aTask->GetPriority());
}
// We choose not to interrupt main thread tasks for tasks which may be // executed off the main thread.
}
}
std::vector<RefPtr<Task>> storedTasks; // Find all relevant tasks. for (auto iter = mMainThreadTasks.begin(); iter != mMainThreadTasks.end();) { if ((*iter)->mTaskManager == aManager) {
storedTasks.push_back(*iter);
iter = mMainThreadTasks.erase(iter);
} else {
iter++;
}
}
// Reinsert found tasks with their new priorities. for (RefPtr<Task>& ref : storedTasks) { // Kept alive at first by the vector and then by mMainThreadTasks.
Task* task = ref;
task->mPriorityModifier = modifier; auto insertion = mMainThreadTasks.insert(std::move(ref));
MOZ_ASSERT(insertion.second);
task->mIterator = insertion.first;
}
}
} // namespace mozilla
¤ Dauer der Verarbeitung: 0.23 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.