// SPDX-License-Identifier: GPL-2.0 /* * Pressure stall information for CPU, memory and IO * * Copyright (c) 2018 Facebook, Inc. * Author: Johannes Weiner <hannes@cmpxchg.org> * * Polling support by Suren Baghdasaryan <surenb@google.com> * Copyright (c) 2018 Google, Inc. * * When CPU, memory and IO are contended, tasks experience delays that * reduce throughput and introduce latencies into the workload. Memory * and IO contention, in addition, can cause a full loss of forward * progress in which the CPU goes idle. * * This code aggregates individual task delays into resource pressure * metrics that indicate problems with both workload health and * resource utilization. * * Model * * The time in which a task can execute on a CPU is our baseline for * productivity. Pressure expresses the amount of time in which this * potential cannot be realized due to resource contention. * * This concept of productivity has two components: the workload and * the CPU. To measure the impact of pressure on both, we define two * contention states for a resource: SOME and FULL. * * In the SOME state of a given resource, one or more tasks are * delayed on that resource. This affects the workload's ability to * perform work, but the CPU may still be executing other tasks. * * In the FULL state of a given resource, all non-idle tasks are * delayed on that resource such that nobody is advancing and the CPU * goes idle. This leaves both workload and CPU unproductive. * * SOME = nr_delayed_tasks != 0 * FULL = nr_delayed_tasks != 0 && nr_productive_tasks == 0 * * What it means for a task to be productive is defined differently * for each resource. For IO, productive means a running task. For * memory, productive means a running task that isn't a reclaimer. For * CPU, productive means an on-CPU task. * * Naturally, the FULL state doesn't exist for the CPU resource at the * system level, but exist at the cgroup level. At the cgroup level, * FULL means all non-idle tasks in the cgroup are delayed on the CPU * resource which is being used by others outside of the cgroup or * throttled by the cgroup cpu.max configuration. * * The percentage of wall clock time spent in those compound stall * states gives pressure numbers between 0 and 100 for each resource, * where the SOME percentage indicates workload slowdowns and the FULL * percentage indicates reduced CPU utilization: * * %SOME = time(SOME) / period * %FULL = time(FULL) / period * * Multiple CPUs * * The more tasks and available CPUs there are, the more work can be * performed concurrently. This means that the potential that can go * unrealized due to resource contention *also* scales with non-idle * tasks and CPUs. * * Consider a scenario where 257 number crunching tasks are trying to * run concurrently on 256 CPUs. If we simply aggregated the task * states, we would have to conclude a CPU SOME pressure number of * 100%, since *somebody* is waiting on a runqueue at all * times. However, that is clearly not the amount of contention the * workload is experiencing: only one out of 256 possible execution * threads will be contended at any given time, or about 0.4%. * * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any * given time *one* of the tasks is delayed due to a lack of memory. * Again, looking purely at the task state would yield a memory FULL * pressure number of 0%, since *somebody* is always making forward * progress. But again this wouldn't capture the amount of execution * potential lost, which is 1 out of 4 CPUs, or 25%. * * To calculate wasted potential (pressure) with multiple processors, * we have to base our calculation on the number of non-idle tasks in * conjunction with the number of available CPUs, which is the number * of potential execution threads. SOME becomes then the proportion of * delayed tasks to possible threads, and FULL is the share of possible * threads that are unproductive due to delays: * * threads = min(nr_nonidle_tasks, nr_cpus) * SOME = min(nr_delayed_tasks / threads, 1) * FULL = (threads - min(nr_productive_tasks, threads)) / threads * * For the 257 number crunchers on 256 CPUs, this yields: * * threads = min(257, 256) * SOME = min(1 / 256, 1) = 0.4% * FULL = (256 - min(256, 256)) / 256 = 0% * * For the 1 out of 4 memory-delayed tasks, this yields: * * threads = min(4, 4) * SOME = min(1 / 4, 1) = 25% * FULL = (4 - min(3, 4)) / 4 = 25% * * [ Substitute nr_cpus with 1, and you can see that it's a natural * extension of the single-CPU model. ] * * Implementation * * To assess the precise time spent in each such state, we would have * to freeze the system on task changes and start/stop the state * clocks accordingly. Obviously that doesn't scale in practice. * * Because the scheduler aims to distribute the compute load evenly * among the available CPUs, we can track task state locally to each * CPU and, at much lower frequency, extrapolate the global state for * the cumulative stall times and the running averages. * * For each runqueue, we track: * * tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0) * tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_productive_tasks[cpu]) * tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0) * * and then periodically aggregate: * * tNONIDLE = sum(tNONIDLE[i]) * * tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE * tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE * * %SOME = tSOME / period * %FULL = tFULL / period * * This gives us an approximation of pressure that is practical * cost-wise, yet way more sensitive and accurate than periodic * sampling of the aggregate task states would be.
*/ #include <linux/sched/clock.h> #include <linux/workqueue.h> #include <linux/psi.h> #include"sched.h"
/* Snapshot a coherent view of the CPU state */ do {
seq = psi_read_begin(cpu);
now = cpu_clock(cpu);
memcpy(times, groupc->times, sizeof(groupc->times));
state_mask = groupc->state_mask;
state_start = groupc->state_start; if (cpu == current_cpu)
memcpy(tasks, groupc->tasks, sizeof(groupc->tasks));
} while (psi_read_retry(cpu, seq));
/* Calculate state time deltas against the previous snapshot */ for (s = 0; s < NR_PSI_STATES; s++) {
u32 delta; /* * In addition to already concluded states, we also * incorporate currently active states on the CPU, * since states may last for many sampling periods. * * This way we keep our delta sampling buckets small * (u32) and our reported pressure close to what's * actually happening.
*/ if (state_mask & (1 << s))
times[s] += now - state_start;
/* * When collect_percpu_times() from the avgs_work, we don't want to * re-arm avgs_work when all CPUs are IDLE. But the current CPU running * this avgs_work is never IDLE, cause avgs_work can't be shut off. * So for the current CPU, we need to re-arm avgs_work only when * (NR_RUNNING > 1 || NR_IOWAIT > 0 || NR_MEMSTALL > 0), for other CPUs * we can just check PSI_NONIDLE delta.
*/ if (current_work() == &group->avgs_work.work) { bool reschedule;
if (reschedule)
*pchanged_states |= PSI_STATE_RESCHEDULE;
}
}
staticvoid calc_avgs(unsignedlong avg[3], int missed_periods,
u64 time, u64 period)
{ unsignedlong pct;
/* Fill in zeroes for periods of no activity */ if (missed_periods) {
avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods);
avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods);
avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods);
}
/* Sample the most recent active period */
pct = div_u64(time * 100, period);
pct *= FIXED_1;
avg[0] = calc_load(avg[0], EXP_10s, pct);
avg[1] = calc_load(avg[1], EXP_60s, pct);
avg[2] = calc_load(avg[2], EXP_300s, pct);
}
/* * Collect the per-cpu time buckets and average them into a * single time sample that is normalized to wall clock time. * * For averaging, each CPU is weighted by its non-idle time in * the sampling period. This eliminates artifacts from uneven * loading, or even entirely idle CPUs.
*/
for_each_possible_cpu(cpu) {
u32 times[NR_PSI_STATES];
u32 nonidle;
u32 cpu_changed_states;
for (s = 0; s < PSI_NONIDLE; s++)
deltas[s] += (u64)times[s] * nonidle;
}
/* * Integrate the sample into the running statistics that are * reported to userspace: the cumulative stall times and the * decaying averages. * * Pressure percentages are sampled at PSI_FREQ. We might be * called more often when the user polls more frequently than * that; we might be called less often when there is no task * activity, thus no data, and clock ticks are sporadic. The * below handles both.
*/
/* total= */ for (s = 0; s < NR_PSI_STATES - 1; s++)
group->total[aggregator][s] +=
div_u64(deltas[s], max(nonidle_total, 1UL));
if (pchanged_states)
*pchanged_states = changed_states;
}
/* * PSI growth tracking window update and growth calculation routine. * * This approximates a sliding tracking window by interpolating * partially elapsed windows using historical growth data from the * previous intervals. This minimizes memory requirements (by not storing * all the intermediate values in the previous window) and simplifies * the calculations. It works well because PSI signal changes only in * positive direction and over relatively small window sizes the growth * is close to linear.
*/ static u64 window_update(struct psi_window *win, u64 now, u64 value)
{
u64 elapsed;
u64 growth;
elapsed = now - win->start_time;
growth = value - win->start_value; /* * After each tracking window passes win->start_value and * win->start_time get reset and win->prev_growth stores * the average per-window growth of the previous window. * win->prev_growth is then used to interpolate additional * growth from the previous window assuming it was linear.
*/ if (elapsed > win->size)
window_reset(win, now, value, growth); else {
u32 remaining;
/* * On subsequent updates, calculate growth deltas and let * watchers know when their specified thresholds are exceeded.
*/
list_for_each_entry(t, triggers, node) {
u64 growth; bool new_stall;
/* Check for stall activity or a previous threshold breach */ if (!new_stall && !t->pending_event) continue; /* * Check for new stall activity, as well as deferred * events that occurred in the last window after the * trigger had already fired (we want to ratelimit * events without dropping any).
*/ if (new_stall) { /* Calculate growth since last update */
growth = window_update(&t->win, now, total[t->state]); if (!t->pending_event) { if (growth < t->threshold) continue;
t->pending_event = true;
}
} /* Limit event signaling to once per window */ if (now < t->last_event_time + t->win.size) continue;
/* Generate an event */ if (cmpxchg(&t->event, 0, 1) == 0) { if (t->of)
kernfs_notify(t->of->kn); else
wake_up_interruptible(&t->event_wait);
}
t->last_event_time = now; /* Reset threshold breach flag once event got generated */
t->pending_event = false;
}
}
/* * The periodic clock tick can get delayed for various * reasons, especially on loaded systems. To avoid clock * drift, we schedule the clock in fixed psi_period intervals. * But the deltas we sample out of the per-cpu buckets above * are based on the actual time elapsing between clock ticks.
*/
avg_next_update = expires + ((1 + missed_periods) * psi_period);
period = now - (group->avg_last_update + (missed_periods * psi_period));
group->avg_last_update = now;
for (s = 0; s < NR_PSI_STATES - 1; s++) {
u32 sample;
sample = group->total[PSI_AVGS][s] - group->avg_total[s]; /* * Due to the lockless sampling of the time buckets, * recorded time deltas can slip into the next period, * which under full pressure can result in samples in * excess of the period length. * * We don't want to report non-sensical pressures in * excess of 100%, nor do we want to drop such events * on the floor. Instead we punt any overage into the * future until pressure subsides. By doing this we * don't underreport the occurring pressure curve, we * just report it delayed by one period length. * * The error isn't cumulative. As soon as another * delta slips from a period P to P+1, by definition * it frees up its time T in P.
*/ if (sample > period)
sample = period;
group->avg_total[s] += sample;
calc_avgs(group->avg[s], missed_periods, sample, period);
}
dwork = to_delayed_work(work);
group = container_of(dwork, struct psi_group, avgs_work);
mutex_lock(&group->avgs_lock);
now = sched_clock();
collect_percpu_times(group, PSI_AVGS, &changed_states); /* * If there is task activity, periodically fold the per-cpu * times and feed samples into the running averages. If things * are idle and there is no data to process, stop the clock. * Once restarted, we'll catch up the running averages in one * go - see calc_avgs() and missed_periods.
*/ if (now >= group->avg_next_update) {
update_triggers(group, now, PSI_AVGS);
group->avg_next_update = update_averages(group, now);
}
/* Schedule rtpolling if it's not already scheduled or forced. */ staticvoid psi_schedule_rtpoll_work(struct psi_group *group, unsignedlong delay, bool force)
{ struct task_struct *task;
/* * atomic_xchg should be called even when !force to provide a * full memory barrier (see the comment inside psi_rtpoll_work).
*/ if (atomic_xchg(&group->rtpoll_scheduled, 1) && !force) return;
rcu_read_lock();
task = rcu_dereference(group->rtpoll_task); /* * kworker might be NULL in case psi_trigger_destroy races with * psi_task_change (hotpath) which can't use locks
*/ if (likely(task))
mod_timer(&group->rtpoll_timer, jiffies + delay); else
atomic_set(&group->rtpoll_scheduled, 0);
if (now > group->rtpoll_until) { /* * We are either about to start or might stop rtpolling if no * state change was recorded. Resetting rtpoll_scheduled leaves * a small window for psi_group_change to sneak in and schedule * an immediate rtpoll_work before we get to rescheduling. One * potential extra wakeup at the end of the rtpolling window * should be negligible and rtpoll_next_update still keeps * updates correctly on schedule.
*/
atomic_set(&group->rtpoll_scheduled, 0); /* * A task change can race with the rtpoll worker that is supposed to * report on it. To avoid missing events, ensure ordering between * rtpoll_scheduled and the task state accesses, such that if the * rtpoll worker misses the state update, the task change is * guaranteed to reschedule the rtpoll worker: * * rtpoll worker: * atomic_set(rtpoll_scheduled, 0) * smp_mb() * LOAD states * * task change: * STORE states * if atomic_xchg(rtpoll_scheduled, 1) == 0: * schedule rtpoll worker * * The atomic_xchg() implies a full barrier.
*/
smp_mb();
} else { /* The rtpolling window is not over, keep rescheduling */
force_reschedule = true;
}
if (changed_states & group->rtpoll_states) { /* Initialize trigger windows when entering rtpolling mode */ if (now > group->rtpoll_until)
init_rtpoll_triggers(group, now);
/* * Keep the monitor active for at least the duration of the * minimum tracking window as long as monitor states are * changing.
*/
group->rtpoll_until = now +
group->rtpoll_min_period * UPDATES_PER_WINDOW;
}
/* * Start with TSK_ONCPU, which doesn't have a corresponding * task count - it's just a boolean flag directly encoded in * the state mask. Clear, set, or carry the current state if * no changes are requested.
*/ if (unlikely(clear & TSK_ONCPU)) {
state_mask = 0;
clear &= ~TSK_ONCPU;
} elseif (unlikely(set & TSK_ONCPU)) {
state_mask = PSI_ONCPU;
set &= ~TSK_ONCPU;
} else {
state_mask = groupc->state_mask & PSI_ONCPU;
}
/* * The rest of the state mask is calculated based on the task * counts. Update those first, then construct the mask.
*/ for (t = 0, m = clear; m; m &= ~(1 << t), t++) { if (!(m & (1 << t))) continue; if (groupc->tasks[t]) {
groupc->tasks[t]--;
} elseif (!psi_bug) {
printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n",
cpu, t, groupc->tasks[0],
groupc->tasks[1], groupc->tasks[2],
groupc->tasks[3], clear, set);
psi_bug = 1;
}
}
for (t = 0; set; set &= ~(1 << t), t++) if (set & (1 << t))
groupc->tasks[t]++;
if (!group->enabled) { /* * On the first group change after disabling PSI, conclude * the current state and flush its time. This is unlikely * to matter to the user, but aggregation (get_recent_times) * may have already incorporated the live state into times_prev; * avoid a delta sample underflow when PSI is later re-enabled.
*/ if (unlikely(groupc->state_mask & (1 << PSI_NONIDLE)))
record_times(groupc, now);
/* * Since we care about lost potential, a memstall is FULL * when there are no other working tasks, but also when * the CPU is actively reclaiming and nothing productive * could run even if it were runnable. So when the current * task in a cgroup is in_memstall, the corresponding groupc * on that cpu is in PSI_MEM_FULL state.
*/ if (unlikely((state_mask & PSI_ONCPU) && cpu_curr(cpu)->in_memstall))
state_mask |= (1 << PSI_MEM_FULL);
record_times(groupc, now);
groupc->state_mask = state_mask;
if (state_mask & group->rtpoll_states)
psi_schedule_rtpoll_work(group, 1, false);
if (wake_clock && !delayed_work_pending(&group->avgs_work))
schedule_delayed_work(&group->avgs_work, PSI_FREQ);
}
if (next->pid) {
psi_flags_change(next, 0, TSK_ONCPU); /* * Set TSK_ONCPU on @next's cgroups. If @next shares any * ancestors with @prev, those will already have @prev's * TSK_ONCPU bit set, and we can stop the iteration there.
*/
for_each_group(group, task_psi_group(next)) { struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
if (groupc->state_mask & PSI_ONCPU) {
common = group; break;
}
psi_group_change(group, cpu, 0, TSK_ONCPU, now, true);
}
}
if (prev->pid) { int clear = TSK_ONCPU, set = 0; bool wake_clock = true;
/* * When we're going to sleep, psi_dequeue() lets us * handle TSK_RUNNING, TSK_MEMSTALL_RUNNING and * TSK_IOWAIT here, where we can combine it with * TSK_ONCPU and save walking common ancestors twice.
*/ if (sleep) {
clear |= TSK_RUNNING; if (prev->in_memstall)
clear |= TSK_MEMSTALL_RUNNING; if (prev->in_iowait)
set |= TSK_IOWAIT;
/* * Periodic aggregation shuts off if there is a period of no * task changes, so we wake it back up if necessary. However, * don't do this if the task change is the aggregation worker * itself going to sleep, or we'll ping-pong forever.
*/ if (unlikely((prev->flags & PF_WQ_WORKER) &&
wq_worker_last_func(prev) == psi_avgs_work))
wake_clock = false;
}
/* * TSK_ONCPU is handled up to the common ancestor. If there are * any other differences between the two tasks (e.g. prev goes * to sleep, or only one task is memstall), finish propagating * those differences all the way up to the root.
*/ if ((prev->psi_flags ^ next->psi_flags) & ~TSK_ONCPU) {
clear &= ~TSK_ONCPU;
for_each_group(group, common)
psi_group_change(group, cpu, clear, set, now, wake_clock);
}
}
psi_write_end(cpu);
}
/** * psi_memstall_enter - mark the beginning of a memory stall section * @flags: flags to handle nested sections * * Marks the calling task as being stalled due to a lack of memory, * such as waiting for a refault or performing reclaim.
*/ void psi_memstall_enter(unsignedlong *flags)
{ struct rq_flags rf; struct rq *rq;
if (static_branch_likely(&psi_disabled)) return;
*flags = current->in_memstall; if (*flags) return; /* * in_memstall setting & accounting needs to be atomic wrt * changes to the task's scheduling state, otherwise we can * race with CPU migration.
*/
rq = this_rq_lock_irq(&rf);
/** * psi_memstall_leave - mark the end of an memory stall section * @flags: flags to handle nested memdelay sections * * Marks the calling task as no longer stalled due to lack of memory.
*/ void psi_memstall_leave(unsignedlong *flags)
{ struct rq_flags rf; struct rq *rq;
if (static_branch_likely(&psi_disabled)) return;
if (*flags) return; /* * in_memstall clearing & accounting needs to be atomic wrt * changes to the task's scheduling state, otherwise we could * race with CPU migration.
*/
rq = this_rq_lock_irq(&rf);
void psi_cgroup_free(struct cgroup *cgroup)
{ if (!static_branch_likely(&psi_cgroups_enabled)) return;
cancel_delayed_work_sync(&cgroup->psi->avgs_work);
free_percpu(cgroup->psi->pcpu); /* All triggers must be removed by now */
WARN_ONCE(cgroup->psi->rtpoll_states, "psi: trigger leak\n");
kfree(cgroup->psi);
}
/** * cgroup_move_task - move task to a different cgroup * @task: the task * @to: the target css_set * * Move task to a new cgroup and safely migrate its associated stall * state between the different groups. * * This function acquires the task's rq lock to lock out concurrent * changes to the task's scheduling state and - in case the task is * running - concurrent changes to its stall state.
*/ void cgroup_move_task(struct task_struct *task, struct css_set *to)
{ unsignedint task_flags; struct rq_flags rf; struct rq *rq;
if (!static_branch_likely(&psi_cgroups_enabled)) { /* * Lame to do this here, but the scheduler cannot be locked * from the outside, so we move cgroups from inside sched/.
*/
rcu_assign_pointer(task->cgroups, to); return;
}
rq = task_rq_lock(task, &rf);
/* * We may race with schedule() dropping the rq lock between * deactivating prev and switching to next. Because the psi * updates from the deactivation are deferred to the switch * callback to save cgroup tree updates, the task's scheduling * state here is not coherent with its psi state: * * schedule() cgroup_move_task() * rq_lock() * deactivate_task() * p->on_rq = 0 * psi_dequeue() // defers TSK_RUNNING & TSK_IOWAIT updates * pick_next_task() * rq_unlock() * rq_lock() * psi_task_change() // old cgroup * task->cgroups = to * psi_task_change() // new cgroup * rq_unlock() * rq_lock() * psi_sched_switch() // does deferred updates in new cgroup * * Don't rely on the scheduling state. Use psi_flags instead.
*/
task_flags = task->psi_flags;
if (task_flags)
psi_task_change(task, task_flags, 0);
/* See comment above */
rcu_assign_pointer(task->cgroups, to);
if (task_flags)
psi_task_change(task, 0, task_flags);
task_rq_unlock(rq, task, &rf);
}
void psi_cgroup_restart(struct psi_group *group)
{ int cpu;
/* * After we disable psi_group->enabled, we don't actually * stop percpu tasks accounting in each psi_group_cpu, * instead only stop test_states() loop, record_times() * and averaging worker, see psi_group_change() for details. * * When disable cgroup PSI, this function has nothing to sync * since cgroup pressure files are hidden and percpu psi_group_cpu * would see !psi_group->enabled and only do task accounting. * * When re-enable cgroup PSI, this function use psi_group_change() * to get correct state mask from test_states() loop on tasks[], * and restart groupc->state_start from now, use .clear = .set = 0 * here since no task status really changed.
*/ if (!group->enabled) return;
int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
{ bool only_full = false; int full;
u64 now;
if (static_branch_likely(&psi_disabled)) return -EOPNOTSUPP;
#ifdef CONFIG_IRQ_TIME_ACCOUNTING if (!irqtime_enabled() && res == PSI_IRQ) return -EOPNOTSUPP; #endif
/* Update averages before reporting them */
mutex_lock(&group->avgs_lock);
now = sched_clock();
collect_percpu_times(group, PSI_AVGS, NULL); if (now >= group->avg_next_update)
group->avg_next_update = update_averages(group, now);
mutex_unlock(&group->avgs_lock);
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
only_full = res == PSI_IRQ; #endif
for (full = 0; full < 2 - only_full; full++) { unsignedlong avg[3] = { 0, };
u64 total = 0; int w;
/* CPU FULL is undefined at the system level */ if (!(group == &psi_system && res == PSI_CPU && full)) { for (w = 0; w < 3; w++)
avg[w] = group->avg[res * 2 + full][w];
total = div_u64(group->total[PSI_AVGS][res * 2 + full],
NSEC_PER_USEC);
}
if (static_branch_likely(&psi_disabled)) return ERR_PTR(-EOPNOTSUPP);
/* * Checking the privilege here on file->f_cred implies that a privileged user * could open the file and delegate the write to an unprivileged one.
*/
privileged = cap_raised(file->f_cred->cap_effective, CAP_SYS_RESOURCE);
if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2)
state = PSI_IO_SOME + res * 2; elseif (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2)
state = PSI_IO_FULL + res * 2; else return ERR_PTR(-EINVAL);
if (state >= PSI_NONIDLE) return ERR_PTR(-EINVAL);
if (window_us == 0 || window_us > WINDOW_MAX_US) return ERR_PTR(-EINVAL);
/* * Unprivileged users can only use 2s windows so that averages aggregation * work is used, and no RT threads need to be spawned.
*/ if (!privileged && window_us % 2000000) return ERR_PTR(-EINVAL);
/* * We do not check psi_disabled since it might have been disabled after * the trigger got created.
*/ if (!t) return;
group = t->group; /* * Wakeup waiters to stop polling and clear the queue to prevent it from * being accessed later. Can happen if cgroup is deleted from under a * polling process.
*/ if (t->of)
kernfs_notify(t->of->kn); else
wake_up_interruptible(&t->event_wait);
if (t->aggregator == PSI_AVGS) {
mutex_lock(&group->avgs_lock); if (!list_empty(&t->node)) {
list_del(&t->node);
group->avg_nr_triggers[t->state]--;
}
mutex_unlock(&group->avgs_lock);
} else {
mutex_lock(&group->rtpoll_trigger_lock); if (!list_empty(&t->node)) { struct psi_trigger *tmp;
u64 period = ULLONG_MAX;
list_del(&t->node);
group->rtpoll_nr_triggers[t->state]--; if (!group->rtpoll_nr_triggers[t->state])
group->rtpoll_states &= ~(1 << t->state); /* * Reset min update period for the remaining triggers * iff the destroying trigger had the min window size.
*/ if (group->rtpoll_min_period == div_u64(t->win.size, UPDATES_PER_WINDOW)) {
list_for_each_entry(tmp, &group->rtpoll_triggers, node)
period = min(period, div_u64(tmp->win.size,
UPDATES_PER_WINDOW));
group->rtpoll_min_period = period;
} /* Destroy rtpoll_task when the last trigger is destroyed */ if (group->rtpoll_states == 0) {
group->rtpoll_until = 0;
task_to_destroy = rcu_dereference_protected(
group->rtpoll_task,
lockdep_is_held(&group->rtpoll_trigger_lock));
rcu_assign_pointer(group->rtpoll_task, NULL);
timer_delete(&group->rtpoll_timer);
}
}
mutex_unlock(&group->rtpoll_trigger_lock);
}
/* * Wait for psi_schedule_rtpoll_work RCU to complete its read-side * critical section before destroying the trigger and optionally the * rtpoll_task.
*/
synchronize_rcu(); /* * Stop kthread 'psimon' after releasing rtpoll_trigger_lock to prevent * a deadlock while waiting for psi_rtpoll_work to acquire * rtpoll_trigger_lock
*/ if (task_to_destroy) { /* * After the RCU grace period has expired, the worker * can no longer be found through group->rtpoll_task.
*/
kthread_stop(task_to_destroy);
atomic_set(&group->rtpoll_scheduled, 0);
}
kfree(t);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.