/* * Structure to determine completion condition and record errors. May * be shared by works on different cpus.
*/ struct cpu_stop_done {
atomic_t nr_todo; /* nr left to execute */ int ret; /* collected return value */ struct completion completion; /* fired if nr_todo reaches 0 */
};
/* the actual stopper, one per every possible cpu, enabled on online cpus */ struct cpu_stopper { struct task_struct *thread;
raw_spinlock_t lock; bool enabled; /* is this stopper enabled? */ struct list_head works; /* list of pending works */
void print_stop_info(constchar *log_lvl, struct task_struct *task)
{ /* * If @task is a stopper task, it cannot migrate and task_cpu() is * stable.
*/ struct cpu_stopper *stopper = per_cpu_ptr(&cpu_stopper, task_cpu(task));
if (enabled)
wake_up_process(stopper->thread);
preempt_enable();
return enabled;
}
/** * stop_one_cpu - stop a cpu * @cpu: cpu to stop * @fn: function to execute * @arg: argument to @fn * * Execute @fn(@arg) on @cpu. @fn is run in a process context with * the highest priority preempting any task on the cpu and * monopolizing it. This function returns after the execution is * complete. * * This function doesn't guarantee @cpu stays online till @fn * completes. If @cpu goes down in the middle, execution may happen * partially or fully on different cpus. @fn should either be ready * for that or the caller should ensure that @cpu stays online until * this function completes. * * CONTEXT: * Might sleep. * * RETURNS: * -ENOENT if @fn(@arg) was not executed because @cpu was offline; * otherwise, the return value of @fn.
*/ int stop_one_cpu(unsignedint cpu, cpu_stop_fn_t fn, void *arg)
{ struct cpu_stop_done done; struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done, .caller = _RET_IP_ };
cpu_stop_init_done(&done, 1); if (!cpu_stop_queue_work(cpu, &work)) return -ENOENT; /* * In case @cpu == smp_proccessor_id() we can avoid a sleep+wakeup * cycle by doing a preemption:
*/
cond_resched();
wait_for_completion(&done.completion); return done.ret;
}
/* This controls the threads on each CPU. */ enum multi_stop_state { /* Dummy starting state for thread. */
MULTI_STOP_NONE, /* Awaiting everyone to be scheduled. */
MULTI_STOP_PREPARE, /* Disable interrupts. */
MULTI_STOP_DISABLE_IRQ, /* Run the function */
MULTI_STOP_RUN, /* Exit */
MULTI_STOP_EXIT,
};
struct multi_stop_data {
cpu_stop_fn_t fn; void *data; /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ unsignedint num_threads; conststruct cpumask *active_cpus;
/* Last one to ack a state moves to the next state. */ staticvoid ack_state(struct multi_stop_data *msdata)
{ if (atomic_dec_and_test(&msdata->thread_ack))
set_state(msdata, msdata->state + 1);
}
/* This is the cpu_stop function which stops the CPU. */ staticint multi_cpu_stop(void *data)
{ struct multi_stop_data *msdata = data; enum multi_stop_state newstate, curstate = MULTI_STOP_NONE; int cpu = smp_processor_id(), err = 0; conststruct cpumask *cpumask; unsignedlong flags; bool is_active;
/* * When called from stop_machine_from_inactive_cpu(), irq might * already be disabled. Save the state and restore it on exit.
*/
local_save_flags(flags);
/* Simple state machine */ do { /* Chill out and ensure we re-read multi_stop_state. */
stop_machine_yield(cpumask);
newstate = READ_ONCE(msdata->state); if (newstate != curstate) {
curstate = newstate; switch (curstate) { case MULTI_STOP_DISABLE_IRQ:
local_irq_disable();
hard_irq_disable(); break; case MULTI_STOP_RUN: if (is_active)
err = msdata->fn(msdata->data); break; default: break;
}
ack_state(msdata);
} elseif (curstate > MULTI_STOP_PREPARE) { /* * At this stage all other CPUs we depend on must spin * in the same loop. Any reason for hard-lockup should * be detected and reported on their side.
*/
touch_nmi_watchdog(); /* Also suppress RCU CPU stall warnings. */
rcu_momentary_eqs();
}
} while (curstate != MULTI_STOP_EXIT);
retry: /* * The waking up of stopper threads has to happen in the same * scheduling context as the queueing. Otherwise, there is a * possibility of one of the above stoppers being woken up by another * CPU, and preempting us. This will cause us to not wake up the other * stopper forever.
*/
preempt_disable();
raw_spin_lock_irq(&stopper1->lock);
raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
/* * Ensure that if we race with __stop_cpus() the stoppers won't get * queued up in reverse order leading to system deadlock. * * We can't miss stop_cpus_in_progress if queue_stop_cpus_work() has * queued a work on cpu1 but not on cpu2, we hold both locks. * * It can be falsely true but it is safe to spin until it is cleared, * queue_stop_cpus_work() does everything under preempt_disable().
*/ if (unlikely(stop_cpus_in_progress)) {
err = -EDEADLK; goto unlock;
}
if (unlikely(err == -EDEADLK)) {
preempt_enable();
while (stop_cpus_in_progress)
cpu_relax();
goto retry;
}
if (!err) {
wake_up_process(stopper1->thread);
wake_up_process(stopper2->thread);
}
preempt_enable();
return err;
} /** * stop_two_cpus - stops two cpus * @cpu1: the cpu to stop * @cpu2: the other cpu to stop * @fn: function to execute * @arg: argument to @fn * * Stops both the current and specified CPU and runs @fn on one of them. * * returns when both are completed.
*/ int stop_two_cpus(unsignedint cpu1, unsignedint cpu2, cpu_stop_fn_t fn, void *arg)
{ struct cpu_stop_done done; struct cpu_stop_work work1, work2; struct multi_stop_data msdata;
/** * stop_one_cpu_nowait - stop a cpu but don't wait for completion * @cpu: cpu to stop * @fn: function to execute * @arg: argument to @fn * @work_buf: pointer to cpu_stop_work structure * * Similar to stop_one_cpu() but doesn't wait for completion. The * caller is responsible for ensuring @work_buf is currently unused * and will remain untouched until stopper starts executing @fn. * * CONTEXT: * Don't care. * * RETURNS: * true if cpu_stop_work was queued successfully and @fn will be called, * false otherwise.
*/ bool stop_one_cpu_nowait(unsignedint cpu, cpu_stop_fn_t fn, void *arg, struct cpu_stop_work *work_buf)
{
*work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, .caller = _RET_IP_, }; return cpu_stop_queue_work(cpu, work_buf);
}
/* * Disable preemption while queueing to avoid getting * preempted by a stopper which might wait for other stoppers * to enter @fn which can lead to deadlock.
*/
preempt_disable();
stop_cpus_in_progress = true;
barrier();
for_each_cpu(cpu, cpumask) {
work = &per_cpu(cpu_stopper.stop_work, cpu);
work->fn = fn;
work->arg = arg;
work->done = done;
work->caller = _RET_IP_; if (cpu_stop_queue_work(cpu, work))
queued = true;
}
barrier();
stop_cpus_in_progress = false;
preempt_enable();
/** * stop_cpus - stop multiple cpus * @cpumask: cpus to stop * @fn: function to execute * @arg: argument to @fn * * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu, * @fn is run in a process context with the highest priority * preempting any task on the cpu and monopolizing it. This function * returns after all executions are complete. * * This function doesn't guarantee the cpus in @cpumask stay online * till @fn completes. If some cpus go down in the middle, execution * on the cpu may happen partially or fully on different cpus. @fn * should either be ready for that or the caller should ensure that * the cpus stay online until this function completes. * * All stop_cpus() calls are serialized making it safe for @fn to wait * for all cpus to start executing it. * * CONTEXT: * Might sleep. * * RETURNS: * -ENOENT if @fn(@arg) was not executed at all because all cpus in * @cpumask were offline; otherwise, 0 if all executions of @fn * returned 0, any non zero return value if any returned non zero.
*/ staticint stop_cpus(conststruct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
{ int ret;
/* static works are used, process one request at a time */
mutex_lock(&stop_cpus_mutex);
ret = __stop_cpus(cpumask, fn, arg);
mutex_unlock(&stop_cpus_mutex); return ret;
}
repeat:
work = NULL;
raw_spin_lock_irq(&stopper->lock); if (!list_empty(&stopper->works)) {
work = list_first_entry(&stopper->works, struct cpu_stop_work, list);
list_del_init(&work->list);
}
raw_spin_unlock_irq(&stopper->lock);
if (work) {
cpu_stop_fn_t fn = work->fn; void *arg = work->arg; struct cpu_stop_done *done = work->done; int ret;
/* cpu stop callbacks must not sleep, make in_atomic() == T */
stopper->caller = work->caller;
stopper->fn = fn;
preempt_count_inc();
ret = fn(arg); if (done) { if (ret)
done->ret = ret;
cpu_stop_signal_done(done);
}
preempt_count_dec();
stopper->fn = NULL;
stopper->caller = 0;
WARN_ONCE(preempt_count(), "cpu_stop: %ps(%p) leaked preempt count\n", fn, arg); goto repeat;
}
}
void stop_machine_park(int cpu)
{ struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); /* * Lockless. cpu_stopper_thread() will take stopper->lock and flush * the pending works before it parks, until then it is fine to queue * the new works.
*/
stopper->enabled = false;
kthread_park(stopper->thread);
}
if (!stop_machine_initialized) { /* * Handle the case where stop_machine() is called * early in boot before stop_machine() has been * initialized.
*/ unsignedlong flags; int ret;
WARN_ON_ONCE(msdata.num_threads != 1);
local_irq_save(flags);
hard_irq_disable();
ret = (*fn)(data);
local_irq_restore(flags);
return ret;
}
/* Set the initial state and stop all online cpus. */
set_state(&msdata, MULTI_STOP_PREPARE); return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
}
int stop_machine(cpu_stop_fn_t fn, void *data, conststruct cpumask *cpus)
{ int ret;
/* No CPUs can come up or down during this. */
cpus_read_lock();
ret = stop_machine_cpuslocked(fn, data, cpus);
cpus_read_unlock(); return ret;
}
EXPORT_SYMBOL_GPL(stop_machine);
/* Set the initial state and stop all online cpus. */
set_state(&msdata, MULTI_STOP_PREPARE); return stop_cpus(smt_mask, multi_cpu_stop, &msdata);
}
EXPORT_SYMBOL_GPL(stop_core_cpuslocked); #endif
/** * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU * @fn: the function to run * @data: the data ptr for the @fn() * @cpus: the cpus to run the @fn() on (NULL = any online cpu) * * This is identical to stop_machine() but can be called from a CPU which * is not active. The local CPU is in the process of hotplug (so no other * CPU hotplug can start) and not marked active and doesn't have enough * context to sleep. * * This function provides stop_machine() functionality for such state by * using busy-wait for synchronization and executing @fn directly for local * CPU. * * CONTEXT: * Local CPU is inactive. Temporarily stops all active CPUs. * * RETURNS: * 0 if all executions of @fn returned 0, any non zero return value if any * returned non zero.
*/ int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, conststruct cpumask *cpus)
{ struct multi_stop_data msdata = { .fn = fn, .data = data,
.active_cpus = cpus }; struct cpu_stop_done done; int ret;
/* Local CPU must be inactive and CPU hotplug in progress. */
BUG_ON(cpu_active(raw_smp_processor_id()));
msdata.num_threads = num_active_cpus() + 1; /* +1 for local */
/* No proper task established and can't sleep - busy wait for lock. */ while (!mutex_trylock(&stop_cpus_mutex))
cpu_relax();
/* Schedule work on other CPUs and execute directly for local CPU */
set_state(&msdata, MULTI_STOP_PREPARE);
cpu_stop_init_done(&done, num_active_cpus());
queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
&done);
ret = multi_cpu_stop(&msdata);
/* Busy wait for completion. */ while (!completion_done(&done.completion))
cpu_relax();
mutex_unlock(&stop_cpus_mutex); return ret ?: done.ret;
}
¤ Dauer der Verarbeitung: 0.35 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.