/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* vim: set ts=8 sts=2 et sw=2 tw=80: */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// NOTE: Using short-circuiting here to avoid call to GetSystemPowerStatus() // when we know that that result will not affect the final result. (As // confirmed by the static_assert's above, onBatteryPower does not affect the // result when the lowPriorityProcess is true.)
SYSTEM_POWER_STATUS status; constbool onBatteryPower = !lowPriorityProcess &&
GetSystemPowerStatus(&status) &&
(status.ACLineStatus == 0);
// Uncomment the following line to enable runtime stats during development. // #define TIMERS_RUNTIME_STATS
#ifdef TIMERS_RUNTIME_STATS // This class gathers durations and displays some basic stats when destroyed. // It is intended to be used as a static variable (see `AUTO_TIMERS_STATS` // below), to display stats at the end of the program. class StaticTimersStats { public: explicit StaticTimersStats(constchar* aName) : mName(aName) {}
~StaticTimersStats() { // Using unsigned long long for computations and printfs. using ULL = unsignedlonglong;
ULL n = static_cast<ULL>(mCount); if (n == 0) {
printf("[%d] Timers stats `%s`: (nothing)\n", int(profiler_current_process_id().ToNumber()), mName);
} elseif (ULL sumNs = static_cast<ULL>(mSumDurationsNs); sumNs == 0) {
printf("[%d] Timers stats `%s`: %llu\n", int(profiler_current_process_id().ToNumber()), mName, n);
} else {
printf("[%d] Timers stats `%s`: %llu ns / %llu = %llu ns, max %llu ns\n", int(profiler_current_process_id().ToNumber()), mName, sumNs, n,
sumNs / n, static_cast<ULL>(mLongestDurationNs));
}
}
void AddDurationFrom(TimeStamp aStart) { // Duration between aStart and now, rounded to the nearest nanosecond.
DurationNs duration = static_cast<DurationNs>(
(TimeStamp::Now() - aStart).ToMicroseconds() * 1000 + 0.5);
mSumDurationsNs += duration;
++mCount; // Update mLongestDurationNs if this one is longer. for (;;) {
DurationNs longest = mLongestDurationNs; if (MOZ_LIKELY(longest >= duration)) { // This duration is not the longest, nothing to do. break;
} if (MOZ_LIKELY(mLongestDurationNs.compareExchange(longest, duration))) { // Successfully updated `mLongestDurationNs` with the new value. break;
} // Otherwise someone else just updated `mLongestDurationNs`, we need to // try again by looping.
}
}
// RAII object that measures its scoped lifetime duration and reports it to a // `StaticTimersStats`. class MOZ_RAII AutoTimersStats { public: explicit AutoTimersStats(StaticTimersStats& aStats)
: mStats(aStats), mStart(TimeStamp::Now()) {}
// Macro that should be used to collect basic statistics from measurements of // block durations, from where this macro is, until the end of its enclosing // scope. The name is used in the static variable name and when displaying stats // at the end of the program; Another location could use the same name but their // stats will not be combined, so use different name if these locations should // be distinguished. # define AUTO_TIMERS_STATS(name) \ static ::StaticTimersStats sStat##name(#name); \
::AutoTimersStats autoStat##name(sStat##name);
// This macro only counts the number of times it's used, not durations. // Don't mix with AUTO_TIMERS_STATS! # define COUNT_TIMERS_STATS(name) \ static ::StaticTimersStats sStat##name(#name); \
sStat##name.AddCount();
// TimerEventAllocator is a thread-safe allocator used only for nsTimerEvents. // It's needed to avoid contention over the default allocator lock when // firing timer events (see bug 733277). The thread-safety is required because // nsTimerEvent objects are allocated on the timer thread, and freed on another // thread. Because TimerEventAllocator has its own lock, contention over that // lock is limited to the allocation and deallocation of nsTimerEvent objects. // // Because this is layered over ArenaAllocator, it never shrinks -- even // "freed" nsTimerEvents aren't truly freed, they're just put onto a free-list // for later recycling. So the amount of memory consumed will always be equal // to the high-water mark consumption. But nsTimerEvents are small and it's // unusual to have more than a few hundred of them, so this shouldn't be a // problem in practice.
class TimerEventAllocator { private: struct FreeEntry {
FreeEntry* mNext;
};
// This is a nsICancelableRunnable because we can dispatch it to Workers and // those can be shut down at any time, and in these cases, Cancel() is called // instead of Run(). class nsTimerEvent final : public CancelableRunnable { public:
NS_IMETHOD Run() override;
explicit nsTimerEvent(already_AddRefed<nsTimerImpl> aTimer,
ProfilerThreadId aTimerThreadId)
: mozilla::CancelableRunnable("nsTimerEvent"),
mTimer(aTimer),
mGeneration(mTimer->GetGeneration()),
mTimerThreadId(aTimerThreadId) { // Note: We override operator new for this class, and the override is // fallible!
sAllocatorUsers++;
~nsTimerEvent() {
MOZ_ASSERT(!sCanDeleteAllocator || sAllocatorUsers > 0, "This will result in us attempting to deallocate the " "nsTimerEvent allocator twice");
}
struct TimerMarker { static constexpr Span<constchar> MarkerTypeName() { return MakeStringSpan("Timer");
} staticvoid StreamJSONMarkerData(baseprofiler::SpliceableJSONWriter& aWriter,
uint32_t aDelay, uint8_t aType,
MarkerThreadId aThreadId, bool aCanceled) {
aWriter.IntProperty("delay", aDelay); if (!aThreadId.IsUnspecified()) { // Tech note: If `ToNumber()` returns a uint64_t, the conversion to // int64_t is "implementation-defined" before C++20. This is // acceptable here, because this is a one-way conversion to a unique // identifier that's used to visually separate data by thread on the // front-end.
aWriter.IntProperty( "threadId", static_cast<int64_t>(aThreadId.ThreadId().ToNumber()));
} if (aCanceled) {
aWriter.BoolProperty("canceled", true); // Show a red 'X' as a prefix on the marker chart for canceled timers.
aWriter.StringProperty("prefix", "❌");
}
// The string property for the timer type is not written when the type is // one shot, as that's the type used almost all the time, and that would // consume space in the profiler buffer and then in the profile JSON, // getting in the way of capturing long power profiles. // Bug 1815677 might make this cheap to capture. if (aType != nsITimer::TYPE_ONE_SHOT) { if (aType == nsITimer::TYPE_REPEATING_SLACK) {
aWriter.StringProperty("ttype", "repeating slack");
} elseif (aType == nsITimer::TYPE_REPEATING_PRECISE) {
aWriter.StringProperty("ttype", "repeating precise");
} elseif (aType == nsITimer::TYPE_REPEATING_PRECISE_CAN_SKIP) {
aWriter.StringProperty("ttype", "repeating precise can skip");
} elseif (aType == nsITimer::TYPE_REPEATING_SLACK_LOW_PRIORITY) {
aWriter.StringProperty("ttype", "repeating slack low priority");
} elseif (aType == nsITimer::TYPE_ONE_SHOT_LOW_PRIORITY) {
aWriter.StringProperty("ttype", "low priority");
}
}
} static MarkerSchema MarkerTypeDisplay() { using MS = MarkerSchema;
MS schema{MS::Location::MarkerChart, MS::Location::MarkerTable};
schema.AddKeyLabelFormat("delay", "Delay", MS::Format::Milliseconds);
schema.AddKeyLabelFormat("ttype", "Timer Type", MS::Format::String);
schema.AddKeyLabelFormat("canceled", "Canceled", MS::Format::String);
schema.SetChartLabel("{marker.data.prefix} {marker.data.delay}");
schema.SetTableLabel( "{marker.name} - {marker.data.prefix} {marker.data.delay}"); return schema;
}
};
struct AddRemoveTimerMarker { static constexpr Span<constchar> MarkerTypeName() { return MakeStringSpan("AddRemoveTimer");
} staticvoid StreamJSONMarkerData(baseprofiler::SpliceableJSONWriter& aWriter, const ProfilerString8View& aTimerName,
uint32_t aDelay, MarkerThreadId aThreadId) {
aWriter.StringProperty("name", aTimerName);
aWriter.IntProperty("delay", aDelay); if (!aThreadId.IsUnspecified()) { // Tech note: If `ToNumber()` returns a uint64_t, the conversion to // int64_t is "implementation-defined" before C++20. This is // acceptable here, because this is a one-way conversion to a unique // identifier that's used to visually separate data by thread on the // front-end.
aWriter.IntProperty( "threadId", static_cast<int64_t>(aThreadId.ThreadId().ToNumber()));
}
} static MarkerSchema MarkerTypeDisplay() { using MS = MarkerSchema;
MS schema{MS::Location::MarkerChart, MS::Location::MarkerTable};
schema.AddKeyLabelFormatSearchable("name", "Name", MS::Format::String,
MS::Searchable::Searchable);
schema.AddKeyLabelFormat("delay", "Delay", MS::Format::Milliseconds);
schema.SetTableLabel( "{marker.name} - {marker.data.name} - {marker.data.delay}"); return schema;
}
};
void nsTimerEvent::Init() { sAllocator = new TimerEventAllocator(); }
NS_IMETHODIMP
nsTimerEvent::Run() { if (MOZ_LOG_TEST(GetTimerLog(), LogLevel::Debug)) {
TimeStamp now = TimeStamp::Now();
MOZ_LOG(GetTimerLog(), LogLevel::Debug,
("[this=%p] time between PostTimerEvent() and Fire(): %fms\n", this,
(now - mInitTime).ToMilliseconds()));
}
if (profiler_thread_is_being_profiled_for_markers(mTimerThreadId)) {
MutexAutoLock lock(mTimer->mMutex);
nsAutoCString name;
mTimer->GetName(name, lock); // This adds a marker with the timer name as the marker name, to make it // obvious which timers are being used. This marker will be useful to // understand which timers might be added and firing excessively often.
profiler_add_marker(
name, geckoprofiler::category::TIMER,
MarkerOptions(MOZ_LIKELY(mInitTime)
? MarkerTiming::Interval(
mTimer->mTimeout - mTimer->mDelay, mInitTime)
: MarkerTiming::IntervalUntilNowFrom(
mTimer->mTimeout - mTimer->mDelay),
MarkerThreadId(mTimerThreadId)),
TimerMarker{}, mTimer->mDelay.ToMilliseconds(), mTimer->mType,
MarkerThreadId::CurrentThread(), false); // This marker is meant to help understand the behavior of the timer thread.
profiler_add_marker( "PostTimerEvent", geckoprofiler::category::OTHER,
MarkerOptions(MOZ_LIKELY(mInitTime)
? MarkerTiming::IntervalUntilNowFrom(mInitTime)
: MarkerTiming::InstantNow(),
MarkerThreadId(mTimerThreadId)),
AddRemoveTimerMarker{}, name, mTimer->mDelay.ToMilliseconds(),
MarkerThreadId::CurrentThread());
}
// We hold on to mThread to keep the thread alive.
nsresult rv =
NS_NewNamedThread("Timer", getter_AddRefs(mThread), this,
{.stackSize = nsIThreadManager::DEFAULT_STACK_SIZE,
.blockDispatch = true}); if (NS_FAILED(rv)) {
mThread = nullptr;
} else {
RefPtr<TimerObserverRunnable> r = new TimerObserverRunnable(this); if (NS_IsMainThread()) {
r->Run();
} else {
NS_DispatchToMainThread(r);
}
}
// notify the cond var so that Run() can return if (mWaiting) {
mNotified = true;
mMonitor.Notify();
}
// Need to copy content of mTimers array to a local array // because call to timers' Cancel() (and release its self) // must not be done under the lock. Destructor of a callback // might potentially call some code reentering the same lock // that leads to unexpected behavior or deadlock. // See bug 422472.
timers.SetCapacity(mTimers.Length()); for (Entry& entry : mTimers) { if (entry.Value()) {
timers.AppendElement(entry.Take());
}
}
mTimers.Clear();
}
for (const RefPtr<nsTimerImpl>& timer : timers) {
MOZ_ASSERT(timer);
timer->Cancel();
}
mThread->Shutdown(); // wait for the thread to die
// Timer list should be non-empty and first timer should always be // non-canceled at this point and we rely on that here.
MOZ_ASSERT(!mTimers.IsEmpty());
MOZ_ASSERT(mTimers[0].Value());
// Overview: Find the last timer in the list that can be "bundled" together in // the same wake-up with mTimers[0] and use its timeout as our target wake-up // time.
// bundleWakeup is when we should wake up in order to be able to fire all of // the timers in our selected bundle. It will always be the timeout of the // last timer in the bundle.
TimeStamp bundleWakeup = mTimers[0].Timeout();
// cutoffTime is the latest that we can wake up for the timers currently // accepted into the bundle. These needs to be updated as we go through the // list because later timers may have more strict delay tolerances. const TimeDuration minTimerDelay = TimeDuration::FromMilliseconds(
StaticPrefs::timer_minimum_firing_delay_tolerance_ms()); const TimeDuration maxTimerDelay = TimeDuration::FromMilliseconds(
StaticPrefs::timer_maximum_firing_delay_tolerance_ms());
TimeStamp cutoffTime =
bundleWakeup + ComputeAcceptableFiringDelay(mTimers[0].Delay(),
minTimerDelay, maxTimerDelay);
const TimeStamp curTimerDue = curEntry.Timeout(); if (curTimerDue > cutoffTime) { // Can't include this timer in the bundle - it fires too late. break;
}
// This timer can be included in the bundle. Update bundleWakeup and // cutoffTime.
bundleWakeup = curTimerDue;
cutoffTime = std::min(
curTimerDue + ComputeAcceptableFiringDelay(
curEntry.Delay(), minTimerDelay, maxTimerDelay),
cutoffTime);
MOZ_ASSERT(bundleWakeup <= cutoffTime);
}
#if !defined(XP_WIN) // Due to the fact that, on Windows, each TimeStamp object holds two distinct // "values", this assert is not valid there. See bug 1829983 for the details.
MOZ_ASSERT(bundleWakeup - mTimers[0].Timeout() <=
ComputeAcceptableFiringDelay(mTimers[0].Delay(), minTimerDelay,
maxTimerDelay)); #endif
return bundleWakeup;
}
TimeDuration TimerThread::ComputeAcceptableFiringDelay(
TimeDuration timerDuration, TimeDuration minDelay,
TimeDuration maxDelay) const { // Use the timer's duration divided by this value as a base for how much // firing delay a timer can accept. 8 was chosen specifically because it is a // power of two which means that this division turns nicely into a shift.
constexpr int64_t timerDurationDivider = 8;
static_assert(IsPowerOfTwo(static_cast<uint64_t>(timerDurationDivider))); const TimeDuration tmp = timerDuration / timerDurationDivider; return std::clamp(tmp, minDelay, maxDelay);
}
// TODO: Make mAllowedEarlyFiringMicroseconds const and initialize it in the // constructor.
mAllowedEarlyFiringMicroseconds = 250; const TimeDuration allowedEarlyFiring =
TimeDuration::FromMicroseconds(mAllowedEarlyFiringMicroseconds);
bool forceRunNextTimer = false;
// Queue for tracking of how many timers are fired on each wake-up. We need to // buffer these locally and only send off to glean occasionally to avoid // performance hit. static constexpr size_t kMaxQueuedTimerFired = 128;
size_t queuedTimerFiredCount = 0;
AutoTArray<uint64_t, kMaxQueuedTimerFired> queuedTimersFiredPerWakeup;
queuedTimersFiredPerWakeup.SetLengthAndRetainStorage(kMaxQueuedTimerFired);
#ifdef XP_WIN // kTimerPeriodEvalIntervalSec is the minimum amount of time that must pass // before we will consider changing the timer period again. static constexpr float kTimerPeriodEvalIntervalSec = 2.0f; const TimeDuration timerPeriodEvalInterval =
TimeDuration::FromSeconds(kTimerPeriodEvalIntervalSec);
TimeStamp nextTimerPeriodEval = TimeStamp::Now() + timerPeriodEvalInterval;
// If this is false, we will perform all of the logic but will stop short of // actually changing the timer period. constbool adjustTimerPeriod =
StaticPrefs::timer_auto_increase_timer_resolution();
UINT lastTimePeriodSet = ComputeDesiredTimerPeriod();
if (adjustTimerPeriod) {
timeBeginPeriod(lastTimePeriodSet);
} #endif
uint64_t timersFiredThisWakeup = 0; while (!mShutdown) { // Have to use PRIntervalTime here, since PR_WaitCondVar takes it
TimeDuration waitFor; bool forceRunThisTimer = forceRunNextTimer;
forceRunNextTimer = false;
#ifdef DEBUG
VerifyTimerListConsistency(); #endif
if (mSleeping) { // Sleep for 0.1 seconds while not firing timers.
uint32_t milliseconds = 100; if (ChaosMode::isActive(ChaosFeature::TimerScheduling)) {
milliseconds = ChaosMode::randomUint32LessThan(200);
}
waitFor = TimeDuration::FromMilliseconds(milliseconds);
} else {
waitFor = TimeDuration::Forever();
TimeStamp now = TimeStamp::Now();
#ifdef XP_WIN if (now >= nextTimerPeriodEval) { const UINT newTimePeriod = ComputeDesiredTimerPeriod(); if (newTimePeriod != lastTimePeriodSet) { if (adjustTimerPeriod) {
timeEndPeriod(lastTimePeriodSet);
timeBeginPeriod(newTimePeriod);
}
lastTimePeriodSet = newTimePeriod;
}
nextTimerPeriodEval = now + timerPeriodEvalInterval;
} #endif
if (!mTimers.IsEmpty()) { if (now + allowedEarlyFiring >= mTimers[0].Value()->mTimeout ||
forceRunThisTimer) {
next: // NB: AddRef before the Release under RemoveTimerInternal to avoid // mRefCnt passing through zero, in case all other refs than the one // from mTimers have gone away (the last non-mTimers[i]-ref's Release // must be racing with us, blocked in gThread->RemoveTimer waiting // for TimerThread::mMonitor, under nsTimerImpl::Release.
RefPtr<nsTimerImpl> timerRef(mTimers[0].Take());
RemoveFirstTimerInternal();
MOZ_LOG(GetTimerLog(), LogLevel::Debug,
("Timer thread woke up %fms from when it was supposed to\n",
fabs((now - timerRef->mTimeout).ToMilliseconds())));
// We are going to let the call to PostTimerEvent here handle the // release of the timer so that we don't end up releasing the timer // on the TimerThread instead of on the thread it targets.
{
++timersFiredThisWakeup;
LogTimerEvent::Run run(timerRef.get());
PostTimerEvent(timerRef.forget());
}
if (mShutdown) { break;
}
// Update now, as PostTimerEvent plus the locking may have taken a // tick or two, and we may goto next below.
now = TimeStamp::Now();
}
}
RemoveLeadingCanceledTimersInternal();
if (!mTimers.IsEmpty()) {
TimeStamp timeout = mTimers[0].Value()->mTimeout;
// Don't wait at all (even for PR_INTERVAL_NO_WAIT) if the next timer // is due now or overdue. // // Note that we can only sleep for integer values of a certain // resolution. We use mAllowedEarlyFiringMicroseconds, calculated // before, to do the optimal rounding (i.e., of how to decide what // interval is so small we should not wait at all). double microseconds = (timeout - now).ToMicroseconds();
// The mean value of sFractions must be 1 to ensure that the average of // a long sequence of timeouts converges to the actual sum of their // times. static constexpr double sChaosFractions[] = {0.0, 0.25, 0.5, 0.75,
1.0, 1.75, 2.75}; if (ChaosMode::isActive(ChaosFeature::TimerScheduling)) {
microseconds *= sChaosFractions[ChaosMode::randomUint32LessThan(
std::size(sChaosFractions))];
forceRunNextTimer = true;
}
if (microseconds < mAllowedEarlyFiringMicroseconds) {
forceRunNextTimer = false; goto next; // round down; execute event now
}
// TECHNICAL NOTE: Determining waitFor (by subtracting |now| from our // desired wake-up time) at this point is not ideal. For one thing, the // |now| that we have at this point is somewhat old. Secondly, there is // quite a bit of code between here and where we actually use waitFor to // request sleep. If I am thinking about this correctly, both of these // will contribute to us requesting more sleep than is actually needed // to wake up at our desired time. We could avoid this problem by only // determining our desired wake-up time here and then calculating the // wait time when we're actually about to sleep. const TimeStamp wakeupTime = ComputeWakeupTimeFromTimers();
waitFor = wakeupTime - now;
// If this were to fail that would mean that we had more timers that we // should have fired.
MOZ_ASSERT(!waitFor.IsZero());
if (ChaosMode::isActive(ChaosFeature::TimerScheduling)) { // If chaos mode is active then mess with the amount of time that we // request to sleep (without changing what we record as our expected // wake-up time). This will simulate unintended early/late wake-ups. constdouble waitInMs = waitFor.ToMilliseconds(); constdouble chaosWaitInMs =
waitInMs * sChaosFractions[ChaosMode::randomUint32LessThan(
std::size(sChaosFractions))];
waitFor = TimeDuration::FromMilliseconds(chaosWaitInMs);
}
if (MOZ_LOG_TEST(GetTimerLog(), LogLevel::Debug)) { if (waitFor == TimeDuration::Forever())
MOZ_LOG(GetTimerLog(), LogLevel::Debug, ("waiting forever\n")); else
MOZ_LOG(GetTimerLog(), LogLevel::Debug,
("waiting for %f\n", waitFor.ToMilliseconds()));
}
}
{ // About to sleep - let's make note of how many timers we processed and // see if we should send out a new batch of telemetry.
queuedTimersFiredPerWakeup[queuedTimerFiredCount] = timersFiredThisWakeup;
++queuedTimerFiredCount; if (queuedTimerFiredCount == kMaxQueuedTimerFired) {
glean::timer_thread::timers_fired_per_wakeup.AccumulateSamples(
queuedTimersFiredPerWakeup);
queuedTimerFiredCount = 0;
}
}
// About to shut down - let's send out the final batch of timers fired counts. if (queuedTimerFiredCount != 0) {
queuedTimersFiredPerWakeup.SetLengthAndRetainStorage(queuedTimerFiredCount);
glean::timer_thread::timers_fired_per_wakeup.AccumulateSamples(
queuedTimersFiredPerWakeup);
}
#ifdef XP_WIN // About to shut down - let's finish off the last time period that we set. if (adjustTimerPeriod) {
timeEndPeriod(lastTimePeriodSet);
} #endif
if (!aTimer->mEventTarget) { return NS_ERROR_NOT_INITIALIZED;
}
nsresult rv = Init(); if (NS_FAILED(rv)) { return rv;
}
// Awaken the timer thread if: // - This timer needs to fire *before* the Timer Thread is scheduled to wake // up. // AND/OR // - The delay is 0, which is usually meant to be run as soon as possible. // Note: Even if the thread is scheduled to wake up now/soon, on some // systems there could be a significant delay compared to notifying, which // is almost immediate; and some users of 0-delay depend on it being this // fast! const TimeDuration minTimerDelay = TimeDuration::FromMilliseconds(
StaticPrefs::timer_minimum_firing_delay_tolerance_ms()); const TimeDuration maxTimerDelay = TimeDuration::FromMilliseconds(
StaticPrefs::timer_maximum_firing_delay_tolerance_ms()); const TimeDuration firingDelay = ComputeAcceptableFiringDelay(
aTimer->mDelay, minTimerDelay, maxTimerDelay); constbool firingBeforeNextWakeup =
mIntendedWakeupTime.IsNull() ||
(aTimer->mTimeout + firingDelay < mIntendedWakeupTime); constbool wakeUpTimerThread =
mWaiting && (firingBeforeNextWakeup || aTimer->mDelay.IsZero());
// Note: The timer thread is *not* awoken. // The removed-timer entry is just left null, and will be reused (by a new or // re-set timer) or discarded (when the timer thread logic handles non-null // timers around it). // If this was the front timer, and in the unlikely case that its entry is not // soon reused by a re-set timer, the timer thread will wake up at the // previously-scheduled time, but will quickly notice that there is no actual // pending timer, and will restart its wait until the following real timeout.
if (profiler_thread_is_being_profiled_for_markers(mProfilerThreadId)) {
nsAutoCString name;
aTimer->GetName(name, aProofOfLock);
nsLiteralCString prefix("Anonymous_"); // This marker is meant to help understand the behavior of the timer thread.
profiler_add_marker( "RemoveTimer", geckoprofiler::category::OTHER,
MarkerOptions(MarkerThreadId(mProfilerThreadId),
MarkerStack::MaybeCapture(
name.Equals("nonfunction:JS") ||
StringHead(name, prefix.Length()) == prefix)),
AddRemoveTimerMarker{}, name, aTimer->mDelay.ToMilliseconds(),
MarkerThreadId::CurrentThread()); // This adds a marker with the timer name as the marker name, to make it // obvious which timers are being used. This marker will be useful to // understand which timers might be added and removed excessively often.
profiler_add_marker(name, geckoprofiler::category::TIMER,
MarkerOptions(MarkerTiming::IntervalUntilNowFrom(
aTimer->mTimeout - aTimer->mDelay),
MarkerThreadId(mProfilerThreadId)),
TimerMarker{}, aTimer->mDelay.ToMilliseconds(),
aTimer->mType, MarkerThreadId::CurrentThread(), true);
}
for (const Entry& entry : mTimers) { const nsTimerImpl* timer = entry.Value(); if (timer) { if (entry.Timeout() > aDefault) { return aDefault;
}
// Don't yield to timers created with the *_LOW_PRIORITY type. if (!timer->IsLowPriority()) { bool isOnCurrentThread = false;
nsresult rv =
timer->mEventTarget->IsOnCurrentThread(&isOnCurrentThread); if (NS_SUCCEEDED(rv) && isOnCurrentThread) { return entry.Timeout();
}
}
if (aSearchBound == 0) { // Couldn't find any non-low priority timers for the current thread. // Return a compromise between a very short and a long idle time.
TimeStamp fallbackDeadline =
TimeStamp::Now() + TimeDuration::FromMilliseconds(16); return fallbackDeadline < aDefault ? fallbackDeadline : aDefault;
}
--aSearchBound;
}
}
// No timers for this thread, return the default. return aDefault;
}
// This function must be called from within a lock // Also: we hold the mutex for the nsTimerImpl. bool TimerThread::AddTimerInternal(nsTimerImpl& aTimer) {
mMonitor.AssertCurrentThreadOwns();
aTimer.mMutex.AssertCurrentThreadOwns();
AUTO_TIMERS_STATS(TimerThread_AddTimerInternal); if (mShutdown) { returnfalse;
}
if (insertionIndex != 0 && !mTimers[insertionIndex - 1].Value()) { // Very common scenario in practice: The timer just before the insertion // point is canceled, overwrite it.
AUTO_TIMERS_STATS(TimerThread_AddTimerInternal_overwrite_before);
mTimers[insertionIndex - 1] = Entry{aTimer}; returntrue;
}
const size_t length = mTimers.Length(); if (insertionIndex == length) { // We're at the end (including it's the very first insertion), add new timer // at the end.
AUTO_TIMERS_STATS(TimerThread_AddTimerInternal_append); return mTimers.AppendElement(Entry{aTimer}, mozilla::fallible);
}
if (!mTimers[insertionIndex].Value()) { // The timer at the insertion point is canceled, overwrite it.
AUTO_TIMERS_STATS(TimerThread_AddTimerInternal_overwrite);
mTimers[insertionIndex] = Entry{aTimer}; returntrue;
}
// The new timer has to be inserted.
AUTO_TIMERS_STATS(TimerThread_AddTimerInternal_insert); // The capacity should be checked first, because if it needs to be increased // and the memory allocation fails, only the new timer should be lost. if (length == mTimers.Capacity() && mTimers[length - 1].Value()) { // We have reached capacity, and the last entry is not canceled, so we // really want to increase the capacity in case the extra slot is required. // To force-expand the array, append a canceled-timer entry with a timestamp // far in the future. // This empty Entry may be used below to receive the moved-from previous // entry. If not, it may be used in a later call if we need to append a new // timer at the end.
AUTO_TIMERS_STATS(TimerThread_AddTimerInternal_insert_expand); if (!mTimers.AppendElement(
Entry{mTimers[length - 1].Timeout() +
TimeDuration::FromSeconds(365.0 * 24.0 * 60.0 * 60.0)},
mozilla::fallible)) { returnfalse;
}
}
// Extract the timer at the insertion point, and put the new timer in its // place.
Entry extractedEntry = std::exchange(mTimers[insertionIndex], Entry{aTimer}); // Following entries can be pushed until we hit a canceled timer or the end. for (size_t i = insertionIndex + 1; i < length; ++i) {
Entry& entryRef = mTimers[i]; if (!entryRef.Value()) { // Canceled entry, overwrite it with the extracted entry from before.
COUNT_TIMERS_STATS(TimerThread_AddTimerInternal_insert_overwrite);
entryRef = std::move(extractedEntry); returntrue;
} // Write extracted entry from before, and extract current entry.
COUNT_TIMERS_STATS(TimerThread_AddTimerInternal_insert_shifts);
std::swap(entryRef, extractedEntry);
} // We've reached the end of the list, with still one extracted entry to // re-insert. We've checked the capacity above, this cannot fail.
COUNT_TIMERS_STATS(TimerThread_AddTimerInternal_insert_append);
mTimers.AppendElement(std::move(extractedEntry)); returntrue;
}
// This function must be called from within a lock // Also: we hold the mutex for the nsTimerImpl. bool TimerThread::RemoveTimerInternal(nsTimerImpl& aTimer) {
mMonitor.AssertCurrentThreadOwns();
aTimer.mMutex.AssertCurrentThreadOwns();
AUTO_TIMERS_STATS(TimerThread_RemoveTimerInternal); if (!aTimer.IsInTimerThread()) {
COUNT_TIMERS_STATS(TimerThread_RemoveTimerInternal_not_in_list); returnfalse;
}
AUTO_TIMERS_STATS(TimerThread_RemoveTimerInternal_in_list); for (auto& entry : mTimers) { if (entry.Value() == &aTimer) {
entry.Forget(); returntrue;
}
}
MOZ_ASSERT(!aTimer.IsInTimerThread(), "Not found in the list but it should be!?"); returnfalse;
}
if (!timer->mEventTarget) {
NS_ERROR("Attempt to post timer event to NULL event target"); return;
}
// XXX we may want to reuse this nsTimerEvent in the case of repeating timers.
// Since we already addref'd 'timer', we don't need to addref here. // We will release either in ~nsTimerEvent(), or pass the reference back to // the caller. We need to copy the generation number from this timer into the // event, so we can avoid firing a timer that was re-initialized after being // canceled.
void* p = nsTimerEvent::operatornew(sizeof(nsTimerEvent)); if (!p) { return;
}
RefPtr<nsTimerEvent> event =
::new (KnownNotNull, p) nsTimerEvent(timer.forget(), mProfilerThreadId);
nsresult rv;
{ // We release mMonitor around the Dispatch because if the Dispatch interacts // with the timer API we'll deadlock.
MonitorAutoUnlock unlock(mMonitor);
rv = target->Dispatch(event, NS_DISPATCH_NORMAL); if (NS_FAILED(rv)) {
timer = event->ForgetTimer(); // We do this to avoid possible deadlock by taking the two locks in a // different order than is used in RemoveTimer(). RemoveTimer() has // aTimer->mMutex first. We use timer.get() to keep static analysis // happy // NOTE: I'm not sure that any of the below is actually necessary. It // seems to me that the timer that we're trying to fire will have already // been removed prior to this.
MutexAutoLock lock1(timer.get()->mMutex);
MonitorAutoLock lock2(mMonitor);
RemoveTimerInternal(*timer);
}
}
}
// Note: wake may be notified without preceding sleep notification void TimerThread::DoAfterSleep() { // Mainthread
MonitorAutoLock lock(mMonitor);
mSleeping = false;
// Wake up the timer thread to re-process the array to ensure the sleep delay // is correct, and fire any expired timers (perhaps quite a few)
mNotified = true;
PROFILER_MARKER_UNTYPED("AfterSleep", OTHER,
MarkerThreadId(mProfilerThreadId));
mMonitor.Notify();
}
/* This nsReadOnlyTimer class is used for the values returned by the * TimerThread::GetTimers method. * It is not possible to return a strong reference to the nsTimerImpl * instance (that could extend the lifetime of the timer and cause it to fire * a callback pointing to already freed memory) or a weak reference * (nsSupportsWeakReference doesn't support freeing the referee on a thread * that isn't the thread that owns the weak reference), so instead the timer
* name, delay and type are copied to a new object. */ class nsReadOnlyTimer final : public nsITimer { public: explicit nsReadOnlyTimer(const nsACString& aName, uint32_t aDelay,
uint32_t aType)
: mName(aName), mDelay(aDelay), mType(aType) {}
NS_DECL_ISUPPORTS
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.