// SPDX-License-Identifier: GPL-2.0 /* * Per Entity Load Tracking (PELT) * * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * * Interactivity improvements by Mike Galbraith * (C) 2007 Mike Galbraith <efault@gmx.de> * * Various enhancements by Dmitry Adamushko. * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com> * * Group scheduling enhancements by Srivatsa Vaddagiri * Copyright IBM Corporation, 2007 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> * * Scaled math optimizations by Thomas Gleixner * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de> * * Adaptive scheduling granularity, math enhancements by Peter Zijlstra * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra * * Move PELT related code from fair.c into this pelt.c file * Author: Vincent Guittot <vincent.guittot@linaro.org>
*/ #include"pelt.h"
/* after bounds checking we can collapse to 32-bit */
local_n = n;
/* * As y^PERIOD = 1/2, we can combine * y^n = 1/2^(n/PERIOD) * y^(n%PERIOD) * With a look-up table which covers y^n (n<PERIOD) * * To achieve constant time decay_load.
*/ if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
val >>= local_n / LOAD_AVG_PERIOD;
local_n %= LOAD_AVG_PERIOD;
}
val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32); return val;
}
/* * Accumulate the three separate parts of the sum; d1 the remainder * of the last (incomplete) period, d2 the span of full periods and d3 * the remainder of the (incomplete) current period. * * d1 d2 d3 * ^ ^ ^ * | | | * |<->|<----------------->|<--->| * ... |---x---|------| ... |------|-----x (now) * * p-1 * u' = (u + d1) y^p + 1024 \Sum y^n + d3 y^0 * n=1 * * = u y^p + (Step 1) * * p-1 * d1 y^p + 1024 \Sum y^n + d3 y^0 (Step 2) * n=1
*/ static __always_inline u32
accumulate_sum(u64 delta, struct sched_avg *sa, unsignedlong load, unsignedlong runnable, int running)
{
u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
u64 periods;
delta += sa->period_contrib;
periods = delta / 1024; /* A period is 1024us (~1ms) */
/* * Step 1: decay old *_sum if we crossed period boundaries.
*/ if (periods) {
sa->load_sum = decay_load(sa->load_sum, periods);
sa->runnable_sum =
decay_load(sa->runnable_sum, periods);
sa->util_sum = decay_load((u64)(sa->util_sum), periods);
/* * Step 2
*/
delta %= 1024; if (load) { /* * This relies on the: * * if (!load) * runnable = running = 0; * * clause from ___update_load_sum(); this results in * the below usage of @contrib to disappear entirely, * so no point in calculating it.
*/
contrib = __accumulate_pelt_segments(periods,
1024 - sa->period_contrib, delta);
}
}
sa->period_contrib = delta;
if (load)
sa->load_sum += load * contrib; if (runnable)
sa->runnable_sum += runnable * contrib << SCHED_CAPACITY_SHIFT; if (running)
sa->util_sum += contrib << SCHED_CAPACITY_SHIFT;
return periods;
}
/* * We can represent the historical contribution to runnable average as the * coefficients of a geometric series. To do this we sub-divide our runnable * history into segments of approximately 1ms (1024us); label the segment that * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g. * * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ... * p0 p1 p2 * (now) (~1ms ago) (~2ms ago) * * Let u_i denote the fraction of p_i that the entity was runnable. * * We then designate the fractions u_i as our co-efficients, yielding the * following representation of historical load: * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ... * * We choose y based on the with of a reasonably scheduling period, fixing: * y^32 = 0.5 * * This means that the contribution to load ~32ms ago (u_32) will be weighted * approximately half as much as the contribution to load within the last ms * (u_0). * * When a period "rolls over" and we have new u_0`, multiplying the previous * sum again by y is sufficient to update: * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... ) * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
*/ static __always_inline int
___update_load_sum(u64 now, struct sched_avg *sa, unsignedlong load, unsignedlong runnable, int running)
{
u64 delta;
delta = now - sa->last_update_time; /* * This should only happen when time goes backwards, which it * unfortunately does during sched clock init when we swap over to TSC.
*/ if ((s64)delta < 0) {
sa->last_update_time = now; return 0;
}
/* * Use 1024ns as the unit of measurement since it's a reasonable * approximation of 1us and fast to compute.
*/
delta >>= 10; if (!delta) return 0;
sa->last_update_time += delta << 10;
/* * running is a subset of runnable (weight) so running can't be set if * runnable is clear. But there are some corner cases where the current * se has been already dequeued but cfs_rq->curr still points to it. * This means that weight will be 0 but not running for a sched_entity * but also for a cfs_rq if the latter becomes idle. As an example, * this happens during sched_balance_newidle() which calls * sched_balance_update_blocked_averages(). * * Also see the comment in accumulate_sum().
*/ if (!load)
runnable = running = 0;
/* * Now we know we crossed measurement unit boundaries. The *_avg * accrues by two steps: * * Step 1: accumulate *_sum since last_update_time. If we haven't * crossed period boundaries, finish.
*/ if (!accumulate_sum(delta, sa, load, runnable, running)) return 0;
return 1;
}
/* * When syncing *_avg with *_sum, we must take into account the current * position in the PELT segment otherwise the remaining part of the segment * will be considered as idle time whereas it's not yet elapsed and this will * generate unwanted oscillation in the range [1002..1024[. * * The max value of *_sum varies with the position in the time segment and is * equals to : * * LOAD_AVG_MAX*y + sa->period_contrib * * which can be simplified into: * * LOAD_AVG_MAX - 1024 + sa->period_contrib * * because LOAD_AVG_MAX*y == LOAD_AVG_MAX-1024 * * The same care must be taken when a sched entity is added, updated or * removed from a cfs_rq and we need to update sched_avg. Scheduler entities * and the cfs rq, to which they are attached, have the same position in the * time segment because they use the same clock. This means that we can use * the period_contrib of cfs_rq when updating the sched_avg of a sched_entity * if it's more convenient.
*/ static __always_inline void
___update_load_avg(struct sched_avg *sa, unsignedlong load)
{
u32 divider = get_pelt_divider(sa);
#ifdef CONFIG_SCHED_HW_PRESSURE /* * hardware: * * load_sum = \Sum se->avg.load_sum but se->avg.load_sum is not tracked * * util_avg and runnable_load_avg are not supported and meaningless. * * Unlike rt/dl utilization tracking that track time spent by a cpu * running a rt/dl task through util_avg, the average HW pressure is * tracked through load_avg. This is because HW pressure signal is * time weighted "delta" capacity unlike util_avg which is binary. * "delta capacity" = actual capacity - * capped capacity a cpu due to a HW event.
*/
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ /* * IRQ: * * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked * util_sum = cpu_scale * load_sum * runnable_sum = util_sum * * load_avg and runnable_avg are not supported and meaningless. *
*/
int update_irq_load_avg(struct rq *rq, u64 running)
{ int ret = 0;
/* * We can't use clock_pelt because IRQ time is not accounted in * clock_task. Instead we directly scale the running time to * reflect the real amount of computation
*/
running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq)));
running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq)));
/* * We know the time that has been used by interrupt since last update * but we don't when. Let be pessimistic and assume that interrupt has * happened just before the update. This is not so far from reality * because interrupt will most probably wake up task and trig an update * of rq clock during which the metric is updated. * We start to decay with normal context time and then we add the * interrupt context time. * We can safely remove running from rq->clock because * rq->clock += delta with delta >= running
*/
ret = ___update_load_sum(rq->clock - running, &rq->avg_irq,
0,
0,
0);
ret += ___update_load_sum(rq->clock, &rq->avg_irq,
1,
1,
1);
if (ret) {
___update_load_avg(&rq->avg_irq, 1);
trace_pelt_irq_tp(rq);
}
/* * Load avg and utiliztion metrics need to be updated periodically and before * consumption. This function updates the metrics for all subsystems except for * the fair class. @rq must be locked and have its clock updated.
*/ bool update_other_load_avgs(struct rq *rq)
{
u64 now = rq_clock_pelt(rq); conststruct sched_class *curr_class = rq->donor->sched_class; unsignedlong hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.