/* The rq is idle, we can sync to clock_task */ staticinlinevoid _update_idle_rq_clock_pelt(struct rq *rq)
{
rq->clock_pelt = rq_clock_task(rq);
u64_u32_store(rq->clock_idle, rq_clock(rq)); /* Paired with smp_rmb in migrate_se_pelt_lag() */
smp_wmb();
u64_u32_store(rq->clock_pelt_idle, rq_clock_pelt(rq));
}
/* * The clock_pelt scales the time to reflect the effective amount of * computation done during the running delta time but then sync back to * clock_task when rq is idle. * * * absolute time | 1| 2| 3| 4| 5| 6| 7| 8| 9|10|11|12|13|14|15|16 * @ max capacity ------******---------------******--------------- * @ half capacity ------************---------************--------- * clock pelt | 1| 2| 3| 4| 7| 8| 9| 10| 11|14|15|16 *
*/ staticinlinevoid update_rq_clock_pelt(struct rq *rq, s64 delta)
{ if (unlikely(is_idle_task(rq->curr))) {
_update_idle_rq_clock_pelt(rq); return;
}
/* * When a rq runs at a lower compute capacity, it will need * more time to do the same amount of work than at max * capacity. In order to be invariant, we scale the delta to * reflect how much work has been really done. * Running longer results in stealing idle time that will * disturb the load signal compared to max capacity. This * stolen idle time will be automatically reflected when the * rq will be idle and the clock will be synced with * rq_clock_task.
*/
/* * Scale the elapsed time to reflect the real amount of * computation
*/
delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq)));
delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));
rq->clock_pelt += delta;
}
/* * When rq becomes idle, we have to check if it has lost idle time * because it was fully busy. A rq is fully used when the /Sum util_sum * is greater or equal to: * (LOAD_AVG_MAX - 1024 + rq->cfs.avg.period_contrib) << SCHED_CAPACITY_SHIFT; * For optimization and computing rounding purpose, we don't take into account * the position in the current window (period_contrib) and we use the higher * bound of util_sum to decide.
*/ staticinlinevoid update_idle_rq_clock_pelt(struct rq *rq)
{
u32 divider = ((LOAD_AVG_MAX - 1024) << SCHED_CAPACITY_SHIFT) - LOAD_AVG_MAX;
u32 util_sum = rq->cfs.avg.util_sum;
util_sum += rq->avg_rt.util_sum;
util_sum += rq->avg_dl.util_sum;
/* * Reflecting stolen time makes sense only if the idle * phase would be present at max capacity. As soon as the * utilization of a rq has reached the maximum value, it is * considered as an always running rq without idle time to * steal. This potential idle time is considered as lost in * this case. We keep track of this lost idle time compare to * rq's clock_task.
*/ if (util_sum >= divider)
rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt;
/* rq->task_clock normalized against any time this cfs_rq has spent throttled */ staticinline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
{ if (unlikely(cfs_rq->throttle_count)) return cfs_rq->throttled_clock_pelt - cfs_rq->throttled_clock_pelt_time;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.