/** * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism. * @cblist: Callback list. * @lock: Lock protecting per-CPU callback list. * @rtp_jiffies: Jiffies counter value for statistics. * @lazy_timer: Timer to unlazify callbacks. * @urgent_gp: Number of additional non-lazy grace periods. * @rtp_n_lock_retries: Rough lock-contention statistic. * @rtp_work: Work queue for invoking callbacks. * @rtp_irq_work: IRQ work queue for deferred wakeups. * @barrier_q_head: RCU callback for barrier operation. * @rtp_blkd_tasks: List of tasks blocked as readers. * @rtp_exit_list: List of tasks in the latter portion of do_exit(). * @cpu: CPU number corresponding to this entry. * @index: Index of this CPU in rtpcp_array of the rcu_tasks structure. * @rtpp: Pointer to the rcu_tasks structure.
*/ struct rcu_tasks_percpu { struct rcu_segcblist cblist;
raw_spinlock_t __private lock; unsignedlong rtp_jiffies; unsignedlong rtp_n_lock_retries; struct timer_list lazy_timer; unsignedint urgent_gp; struct work_struct rtp_work; struct irq_work rtp_irq_work; struct rcu_head barrier_q_head; struct list_head rtp_blkd_tasks; struct list_head rtp_exit_list; int cpu; int index; struct rcu_tasks *rtpp;
};
/** * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism. * @cbs_wait: RCU wait allowing a new callback to get kthread's attention. * @cbs_gbl_lock: Lock protecting callback list. * @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone. * @gp_func: This flavor's grace-period-wait function. * @gp_state: Grace period's most recent state transition (debugging). * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping. * @init_fract: Initial backoff sleep interval. * @gp_jiffies: Time of last @gp_state transition. * @gp_start: Most recent grace-period start in jiffies. * @tasks_gp_seq: Number of grace periods completed since boot in upper bits. * @n_ipis: Number of IPIs sent to encourage grace periods to end. * @n_ipis_fails: Number of IPI-send failures. * @kthread_ptr: This flavor's grace-period/callback-invocation kthread. * @lazy_jiffies: Number of jiffies to allow callbacks to be lazy. * @pregp_func: This flavor's pre-grace-period function (optional). * @pertask_func: This flavor's per-task scan function (optional). * @postscan_func: This flavor's post-task scan function (optional). * @holdouts_func: This flavor's holdout-list scan function (optional). * @postgp_func: This flavor's post-grace-period function (optional). * @call_func: This flavor's call_rcu()-equivalent function. * @wait_state: Task state for synchronous grace-period waits (default TASK_UNINTERRUPTIBLE). * @rtpcpu: This flavor's rcu_tasks_percpu structure. * @rtpcp_array: Array of pointers to rcu_tasks_percpu structure of CPUs in cpu_possible_mask. * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks. * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing. * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing. * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers. * @barrier_q_mutex: Serialize barrier operations. * @barrier_q_count: Number of queues being waited on. * @barrier_q_completion: Barrier wait/wakeup mechanism. * @barrier_q_seq: Sequence number for barrier operations. * @barrier_q_start: Most recent barrier start in jiffies. * @name: This flavor's textual name. * @kname: This flavor's kthread name.
*/ struct rcu_tasks { struct rcuwait cbs_wait;
raw_spinlock_t cbs_gbl_lock; struct mutex tasks_gp_mutex; int gp_state; int gp_sleep; int init_fract; unsignedlong gp_jiffies; unsignedlong gp_start; unsignedlong tasks_gp_seq; unsignedlong n_ipis; unsignedlong n_ipis_fails; struct task_struct *kthread_ptr; unsignedlong lazy_jiffies;
rcu_tasks_gp_func_t gp_func;
pregp_func_t pregp_func;
pertask_func_t pertask_func;
postscan_func_t postscan_func;
holdouts_func_t holdouts_func;
postgp_func_t postgp_func;
call_rcu_func_t call_func; unsignedint wait_state; struct rcu_tasks_percpu __percpu *rtpcpu; struct rcu_tasks_percpu **rtpcp_array; int percpu_enqueue_shift; int percpu_enqueue_lim; int percpu_dequeue_lim; unsignedlong percpu_dequeue_gpseq; struct mutex barrier_q_mutex;
atomic_t barrier_q_count; struct completion barrier_q_completion; unsignedlong barrier_q_seq; unsignedlong barrier_q_start; char *name; char *kname;
};
/* Record grace-period phase and time. */ staticvoid set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
{
rtp->gp_state = newstate;
rtp->gp_jiffies = jiffies;
}
#ifndef CONFIG_TINY_RCU /* Return state name. */ staticconstchar *tasks_gp_state_getname(struct rcu_tasks *rtp)
{ int i = data_race(rtp->gp_state); // Let KCSAN detect update races int j = READ_ONCE(i); // Prevent the compiler from reading twice
// Initialize per-CPU callback lists for the specified flavor of // Tasks RCU. Do not enqueue callbacks before this function is invoked. staticvoid cblist_init_generic(struct rcu_tasks *rtp)
{ int cpu; int lim; int shift; int maxcpu; int index = 0;
// Enqueue a callback for the specified flavor of Tasks RCU. staticvoid call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, struct rcu_tasks *rtp)
{ int chosen_cpu; unsignedlong flags; bool havekthread = smp_load_acquire(&rtp->kthread_ptr); int ideal_cpu; unsignedlong j; bool needadjust = false; bool needwake; struct rcu_tasks_percpu *rtpcp;
rhp->next = NULL;
rhp->func = func;
local_irq_save(flags);
rcu_read_lock();
ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift);
chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask);
WARN_ON_ONCE(chosen_cpu >= rcu_task_cpu_ids);
rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu); if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled.
raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
j = jiffies; if (rtpcp->rtp_jiffies != j) {
rtpcp->rtp_jiffies = j;
rtpcp->rtp_n_lock_retries = 0;
} if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim &&
READ_ONCE(rtp->percpu_enqueue_lim) != rcu_task_cpu_ids)
needadjust = true; // Defer adjustment to avoid deadlock.
} // Queuing callbacks before initialization not yet supported. if (WARN_ON_ONCE(!rcu_segcblist_is_enabled(&rtpcp->cblist)))
rcu_segcblist_init(&rtpcp->cblist);
needwake = (func == wakeme_after_rcu) ||
(rcu_segcblist_n_cbs(&rtpcp->cblist) == rcu_task_lazy_lim); if (havekthread && !needwake && !timer_pending(&rtpcp->lazy_timer)) { if (rtp->lazy_jiffies)
mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp)); else
needwake = rcu_segcblist_empty(&rtpcp->cblist);
} if (needwake)
rtpcp->urgent_gp = 3;
rcu_segcblist_enqueue(&rtpcp->cblist, rhp);
raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); if (unlikely(needadjust)) {
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); if (rtp->percpu_enqueue_lim != rcu_task_cpu_ids) {
WRITE_ONCE(rtp->percpu_enqueue_shift, 0);
WRITE_ONCE(rtp->percpu_dequeue_lim, rcu_task_cpu_ids);
smp_store_release(&rtp->percpu_enqueue_lim, rcu_task_cpu_ids);
pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
}
raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
}
rcu_read_unlock(); /* We can't create the thread unless interrupts are enabled. */ if (needwake && READ_ONCE(rtp->kthread_ptr))
irq_work_queue(&rtpcp->rtp_irq_work);
}
// RCU callback function for rcu_barrier_tasks_generic(). staticvoid rcu_barrier_tasks_generic_cb(struct rcu_head *rhp)
{ struct rcu_tasks *rtp; struct rcu_tasks_percpu *rtpcp;
rhp->next = rhp; // Mark the callback as having been invoked.
rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head);
rtp = rtpcp->rtpp; if (atomic_dec_and_test(&rtp->barrier_q_count))
complete(&rtp->barrier_q_completion);
}
// Wait for all in-flight callbacks for the specified RCU Tasks flavor. // Operates in a manner similar to rcu_barrier(). staticvoid __maybe_unused rcu_barrier_tasks_generic(struct rcu_tasks *rtp)
{ int cpu; unsignedlong flags; struct rcu_tasks_percpu *rtpcp; unsignedlong s = rcu_seq_snap(&rtp->barrier_q_seq);
// Advance callbacks and indicate whether either a grace period or // callback invocation is needed. staticint rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
{ int cpu; int dequeue_limit; unsignedlong flags; bool gpdone = poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq); long n; long ncbs = 0; long ncbsnz = 0; int needgpcb = 0;
dequeue_limit = smp_load_acquire(&rtp->percpu_dequeue_lim); for (cpu = 0; cpu < dequeue_limit; cpu++) { if (!cpu_possible(cpu)) continue; struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
/* Advance and accelerate any new callbacks. */ if (!rcu_segcblist_n_cbs(&rtpcp->cblist)) continue;
raw_spin_lock_irqsave_rcu_node(rtpcp, flags); // Should we shrink down to a single callback queue?
n = rcu_segcblist_n_cbs(&rtpcp->cblist); if (n) {
ncbs += n; if (cpu > 0)
ncbsnz += n;
}
rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
(void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); if (rtpcp->urgent_gp > 0 && rcu_segcblist_pend_cbs(&rtpcp->cblist)) { if (rtp->lazy_jiffies)
rtpcp->urgent_gp--;
needgpcb |= 0x3;
} elseif (rcu_segcblist_empty(&rtpcp->cblist)) {
rtpcp->urgent_gp = 0;
} if (rcu_segcblist_ready_cbs(&rtpcp->cblist))
needgpcb |= 0x1;
raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
}
// Shrink down to a single callback queue if appropriate. // This is done in two stages: (1) If there are no more than // rcu_task_collapse_lim callbacks on CPU 0 and none on any other // CPU, limit enqueueing to CPU 0. (2) After an RCU grace period, // if there has not been an increase in callbacks, limit dequeuing // to CPU 0. Note the matching RCU read-side critical section in // call_rcu_tasks_generic(). if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); if (rtp->percpu_enqueue_lim > 1) {
WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(rcu_task_cpu_ids));
smp_store_release(&rtp->percpu_enqueue_lim, 1);
rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
gpdone = false;
pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
}
raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
} if (rcu_task_cb_adjust && !ncbsnz && gpdone) {
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) {
WRITE_ONCE(rtp->percpu_dequeue_lim, 1);
pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
} if (rtp->percpu_dequeue_lim == 1) { for (cpu = rtp->percpu_dequeue_lim; cpu < rcu_task_cpu_ids; cpu++) { if (!cpu_possible(cpu)) continue; struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
// Wait for one grace period. staticvoid rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot)
{ int needgpcb;
mutex_lock(&rtp->tasks_gp_mutex);
// If there were none, wait a bit and start over. if (unlikely(midboot)) {
needgpcb = 0x2;
} else {
mutex_unlock(&rtp->tasks_gp_mutex);
set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
rcuwait_wait_event(&rtp->cbs_wait,
(needgpcb = rcu_tasks_need_gpcb(rtp)),
TASK_IDLE);
mutex_lock(&rtp->tasks_gp_mutex);
}
if (needgpcb & 0x2) { // Wait for one grace period.
set_tasks_gp_state(rtp, RTGS_WAIT_GP);
rtp->gp_start = jiffies;
rcu_seq_start(&rtp->tasks_gp_seq);
rtp->gp_func(rtp);
rcu_seq_end(&rtp->tasks_gp_seq);
}
/* Run on housekeeping CPUs by default. Sysadm can move if desired. */
housekeeping_affine(current, HK_TYPE_RCU);
smp_store_release(&rtp->kthread_ptr, current); // Let GPs start!
/* * Each pass through the following loop makes one check for * newly arrived callbacks, and, if there are some, waits for * one RCU-tasks grace period and then invokes the callbacks. * This loop is terminated by the system going down. ;-)
*/ for (;;) { // Wait for one grace period and invoke any callbacks // that are ready.
rcu_tasks_one_gp(rtp, false);
// Paranoid sleep to keep this from entering a tight loop.
schedule_timeout_idle(rtp->gp_sleep);
}
}
// Wait for a grace period for the specified flavor of Tasks RCU. staticvoid synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
{ /* Complain if the scheduler has not started. */ if (WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, "synchronize_%s() called too soon", rtp->name)) return;
// If the grace-period kthread is running, use it. if (READ_ONCE(rtp->kthread_ptr)) {
wait_rcu_gp_state(rtp->wait_state, rtp->call_func); return;
}
rcu_tasks_one_gp(rtp, true);
}
t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname); if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name)) return;
smp_mb(); /* Ensure others see full kthread. */
}
#ifndef CONFIG_TINY_RCU
/* * Print any non-default Tasks RCU settings.
*/ staticvoid __init rcu_tasks_bootup_oddness(void)
{ #ifdefined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) int rtsimc;
if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
rtsimc = clamp(rcu_task_stall_info_mult, 1, 10); if (rtsimc != rcu_task_stall_info_mult) {
pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc);
rcu_task_stall_info_mult = rtsimc;
} #endif/* #ifdef CONFIG_TASKS_RCU */ #ifdef CONFIG_TASKS_RCU
pr_info("\tTrampoline variant of Tasks RCU enabled.\n"); #endif/* #ifdef CONFIG_TASKS_RCU */ #ifdef CONFIG_TASKS_RUDE_RCU
pr_info("\tRude variant of Tasks RCU enabled.\n"); #endif/* #ifdef CONFIG_TASKS_RUDE_RCU */ #ifdef CONFIG_TASKS_TRACE_RCU
pr_info("\tTracing variant of Tasks RCU enabled.\n"); #endif/* #ifdef CONFIG_TASKS_TRACE_RCU */
}
/* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */ staticvoid show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
{ int cpu; bool havecbs = false; bool haveurgent = false; bool haveurgentcbs = false;
if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)))
havecbs = true; if (data_race(rtpcp->urgent_gp))
haveurgent = true; if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)) && data_race(rtpcp->urgent_gp))
haveurgentcbs = true; if (havecbs && haveurgent && haveurgentcbs) break;
}
pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c%c%c l:%lu %s\n",
rtp->kname,
tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
jiffies - data_race(rtp->gp_jiffies),
data_race(rcu_seq_current(&rtp->tasks_gp_seq)),
data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis), ".k"[!!data_race(rtp->kthread_ptr)], ".C"[havecbs], ".u"[haveurgent], ".U"[haveurgentcbs],
rtp->lazy_jiffies,
s);
}
/* Dump out more rcutorture-relevant state common to all RCU-tasks flavors. */ staticvoid rcu_tasks_torture_stats_print_generic(struct rcu_tasks *rtp, char *tt, char *tf, char *tst)
{
cpumask_var_t cm; int cpu; bool gotcb = false; unsignedlong j = jiffies;
/* * There were callbacks, so we need to wait for an RCU-tasks * grace period. Start off by scanning the task list for tasks * that are not already voluntarily blocked. Mark these tasks * and make a list of them in holdouts.
*/
set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST); if (rtp->pertask_func) {
rcu_read_lock();
for_each_process_thread(g, t)
rtp->pertask_func(t, &holdouts);
rcu_read_unlock();
}
/* * Each pass through the following loop scans the list of holdout * tasks, removing any that are no longer holdouts. When the list * is empty, we are done.
*/
lastreport = jiffies;
lastinfo = lastreport;
rtsi = READ_ONCE(rcu_task_stall_info);
// Start off with initial wait and slowly back off to 1 HZ wait.
fract = rtp->init_fract;
while (!list_empty(&holdouts)) {
ktime_t exp; bool firstreport; bool needreport; int rtst;
// Slowly back off waiting for holdouts
set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS); if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
schedule_timeout_idle(fract);
} else {
exp = jiffies_to_nsecs(fract);
__set_current_state(TASK_IDLE);
schedule_hrtimeout_range(&exp, jiffies_to_nsecs(HZ / 2), HRTIMER_MODE_REL_HARD);
}
//////////////////////////////////////////////////////////////////////// // // Simple variant of RCU whose quiescent states are voluntary context // switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle. // As such, grace periods can take one good long time. There are no // read-side primitives similar to rcu_read_lock() and rcu_read_unlock() // because this implementation is intended to get the system into a safe // state for some of the manipulations involved in tracing and the like. // Finally, this implementation does not support high call_rcu_tasks() // rates from multiple CPUs. If this is required, per-CPU callback lists // will be needed. // // The implementation uses rcu_tasks_wait_gp(), which relies on function // pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread() // function sets these function pointers up so that rcu_tasks_wait_gp() // invokes these functions in this order: // // rcu_tasks_pregp_step(): // Invokes synchronize_rcu() in order to wait for all in-flight // t->on_rq and t->nvcsw transitions to complete. This works because // all such transitions are carried out with interrupts disabled. // rcu_tasks_pertask(), invoked on every non-idle task: // For every runnable non-idle task other than the current one, use // get_task_struct() to pin down that task, snapshot that task's // number of voluntary context switches, and add that task to the // holdout list. // rcu_tasks_postscan(): // Gather per-CPU lists of tasks in do_exit() to ensure that all // tasks that were in the process of exiting (and which thus might // not know to synchronize with this RCU Tasks grace period) have // completed exiting. The synchronize_rcu() in rcu_tasks_postgp() // will take care of any tasks stuck in the non-preemptible region // of do_exit() following its call to exit_tasks_rcu_finish(). // check_all_holdout_tasks(), repeatedly until holdout list is empty: // Scans the holdout list, attempting to identify a quiescent state // for each task on the list. If there is a quiescent state, the // corresponding task is removed from the holdout list. // rcu_tasks_postgp(): // Invokes synchronize_rcu() in order to ensure that all prior // t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks // to have happened before the end of this RCU Tasks grace period. // Again, this works because all such transitions are carried out // with interrupts disabled. // // For each exiting task, the exit_tasks_rcu_start() and // exit_tasks_rcu_finish() functions add and remove, respectively, the // current task to a per-CPU list of tasks that rcu_tasks_postscan() must // wait on. This is necessary because rcu_tasks_postscan() must wait on // tasks that have already been removed from the global list of tasks. // // Pre-grace-period update-side code is ordered before the grace // via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code // is ordered before the grace period via synchronize_rcu() call in // rcu_tasks_pregp_step() and by the scheduler's locks and interrupt // disabling.
/* Pre-grace-period preparation. */ staticvoid rcu_tasks_pregp_step(struct list_head *hop)
{ /* * Wait for all pre-existing t->on_rq and t->nvcsw transitions * to complete. Invoking synchronize_rcu() suffices because all * these transitions occur with interrupts disabled. Without this * synchronize_rcu(), a read-side critical section that started * before the grace period might be incorrectly seen as having * started after the grace period. * * This synchronize_rcu() also dispenses with the need for a * memory barrier on the first store to t->rcu_tasks_holdout, * as it forces the store to happen after the beginning of the * grace period.
*/
synchronize_rcu();
}
/* Check for quiescent states since the pregp's synchronize_rcu() */ staticbool rcu_tasks_is_holdout(struct task_struct *t)
{ int cpu;
/* Has the task been seen voluntarily sleeping? */ if (!READ_ONCE(t->on_rq)) returnfalse;
/* * t->on_rq && !t->se.sched_delayed *could* be considered sleeping but * since it is a spurious state (it will transition into the * traditional blocked state or get woken up without outside * dependencies), not considering it such should only affect timing. * * Be conservative for now and not include it.
*/
/* * Idle tasks (or idle injection) within the idle loop are RCU-tasks * quiescent states. But CPU boot code performed by the idle task * isn't a quiescent state.
*/ if (is_idle_task(t)) returnfalse;
cpu = task_cpu(t);
/* Idle tasks on offline CPUs are RCU-tasks quiescent states. */ if (t == idle_task(cpu) && !rcu_cpu_online(cpu)) returnfalse;
/* Processing between scanning taskslist and draining the holdout list. */ staticvoid rcu_tasks_postscan(struct list_head *hop)
{ int cpu; int rtsi = READ_ONCE(rcu_task_stall_info);
if (!IS_ENABLED(CONFIG_TINY_RCU)) {
tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi;
add_timer(&tasks_rcu_exit_srcu_stall_timer);
}
/* * Exiting tasks may escape the tasklist scan. Those are vulnerable * until their final schedule() with TASK_DEAD state. To cope with * this, divide the fragile exit path part in two intersecting * read side critical sections: * * 1) A task_struct list addition before calling exit_notify(), * which may remove the task from the tasklist, with the * removal after the final preempt_disable() call in do_exit(). * * 2) An _RCU_ read side starting with the final preempt_disable() * call in do_exit() and ending with the final call to schedule() * with TASK_DEAD state. * * This handles the part 1). And postgp will handle part 2) with a * call to synchronize_rcu().
*/
raw_spin_lock_irq_rcu_node(rtpcp);
list_for_each_entry_safe(t, t1, &rtpcp->rtp_exit_list, rcu_tasks_exit_list) { if (list_empty(&t->rcu_tasks_holdout_list))
rcu_tasks_pertask(t, hop);
// RT kernels need frequent pauses, otherwise // pause at least once per pair of jiffies. if (!IS_ENABLED(CONFIG_PREEMPT_RT) && time_before(jiffies, j)) continue;
// Keep our place in the list while pausing. // Nothing else traverses this list, so adding a // bare list_head is OK.
list_add(&tmp, &t->rcu_tasks_exit_list);
raw_spin_unlock_irq_rcu_node(rtpcp);
cond_resched(); // For CONFIG_PREEMPT=n kernels
raw_spin_lock_irq_rcu_node(rtpcp);
t1 = list_entry(tmp.next, struct task_struct, rcu_tasks_exit_list);
list_del(&tmp);
j = jiffies + 1;
}
raw_spin_unlock_irq_rcu_node(rtpcp);
}
if (!IS_ENABLED(CONFIG_TINY_RCU))
timer_delete_sync(&tasks_rcu_exit_srcu_stall_timer);
}
/* See if tasks are still holding out, complain if so. */ staticvoid check_holdout_task(struct task_struct *t, bool needreport, bool *firstreport)
{ int cpu;
/* Finish off the Tasks-RCU grace period. */ staticvoid rcu_tasks_postgp(struct rcu_tasks *rtp)
{ /* * Because ->on_rq and ->nvcsw are not guaranteed to have a full * memory barriers prior to them in the schedule() path, memory * reordering on other CPUs could cause their RCU-tasks read-side * critical sections to extend past the end of the grace period. * However, because these ->nvcsw updates are carried out with * interrupts disabled, we can use synchronize_rcu() to force the * needed ordering on all such CPUs. * * This synchronize_rcu() also confines all ->rcu_tasks_holdout * accesses to be within the grace period, avoiding the need for * memory barriers for ->rcu_tasks_holdout accesses. * * In addition, this synchronize_rcu() waits for exiting tasks * to complete their final preempt_disable() region of execution, * enforcing the whole region before tasklist removal until * the final schedule() with TASK_DEAD state to be an RCU TASKS * read side critical section.
*/
synchronize_rcu();
}
staticvoid tasks_rcu_exit_srcu_stall(struct timer_list *unused)
{ #ifndef CONFIG_TINY_RCU int rtsi;
rtsi = READ_ONCE(rcu_task_stall_info);
pr_info("%s: %s grace period number %lu (since boot) gp_state: %s is %lu jiffies old.\n",
__func__, rcu_tasks.kname, rcu_tasks.tasks_gp_seq,
tasks_gp_state_getname(&rcu_tasks), jiffies - rcu_tasks.gp_jiffies);
pr_info("Please check any exiting tasks stuck between calls to exit_tasks_rcu_start() and exit_tasks_rcu_finish()\n");
tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi;
add_timer(&tasks_rcu_exit_srcu_stall_timer); #endif// #ifndef CONFIG_TINY_RCU
}
/** * call_rcu_tasks() - Queue an RCU for invocation task-based grace period * @rhp: structure to be used for queueing the RCU updates. * @func: actual callback function to be invoked after the grace period * * The callback function will be invoked some time after a full grace * period elapses, in other words after all currently executing RCU * read-side critical sections have completed. call_rcu_tasks() assumes * that the read-side critical sections end at a voluntary context * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle, * or transition to usermode execution. As such, there are no read-side * primitives analogous to rcu_read_lock() and rcu_read_unlock() because * this primitive is intended to determine that all tasks have passed * through a safe state, not so much for data-structure synchronization. * * See the description of call_rcu() for more detailed information on * memory ordering guarantees.
*/ void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
{
call_rcu_tasks_generic(rhp, func, &rcu_tasks);
}
EXPORT_SYMBOL_GPL(call_rcu_tasks);
/** * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed. * * Control will return to the caller some time after a full rcu-tasks * grace period has elapsed, in other words after all currently * executing rcu-tasks read-side critical sections have elapsed. These * read-side critical sections are delimited by calls to schedule(), * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched(). * * This is a very specialized primitive, intended only for a few uses in * tracing and other situations requiring manipulation of function * preambles and profiling hooks. The synchronize_rcu_tasks() function * is not (yet) intended for heavy use from multiple CPUs. * * See the description of synchronize_rcu() for more detailed information * on memory ordering guarantees.
*/ void synchronize_rcu_tasks(void)
{
synchronize_rcu_tasks_generic(&rcu_tasks);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
/** * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks. * * Although the current implementation is guaranteed to wait, it is not * obligated to, for example, if there are no pending callbacks.
*/ void rcu_barrier_tasks(void)
{
rcu_barrier_tasks_generic(&rcu_tasks);
}
EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
/* * Protect against tasklist scan blind spot while the task is exiting and * may be removed from the tasklist. Do this by adding the task to yet * another list. * * Note that the task will remove itself from this list, so there is no * need for get_task_struct(), except in the case where rcu_tasks_pertask() * adds it to the holdout list, in which case rcu_tasks_pertask() supplies * the needed get_task_struct().
*/ void exit_tasks_rcu_start(void)
{ unsignedlong flags; struct rcu_tasks_percpu *rtpcp; struct task_struct *t = current;
/* * Remove the task from the "yet another list" because do_exit() is now * non-preemptible, allowing synchronize_rcu() to wait beyond this point.
*/ void exit_tasks_rcu_finish(void)
{ unsignedlong flags; struct rcu_tasks_percpu *rtpcp; struct task_struct *t = current;
//////////////////////////////////////////////////////////////////////// // // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's // trick of passing an empty function to schedule_on_each_cpu(). // This approach provides batching of concurrent calls to the synchronous // synchronize_rcu_tasks_rude() API. This invokes schedule_on_each_cpu() // in order to send IPIs far and wide and induces otherwise unnecessary // context switches on all online CPUs, whether idle or not. // // Callback handling is provided by the rcu_tasks_kthread() function. // // Ordering is provided by the scheduler's context-switch code.
// Empty function to allow workqueues to force a context switch. staticvoid rcu_tasks_be_rude(struct work_struct *work)
{
}
// Wait for one rude RCU-tasks grace period. staticvoid rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
{
rtp->n_ipis += cpumask_weight(cpu_online_mask);
schedule_on_each_cpu(rcu_tasks_be_rude);
}
/* * call_rcu_tasks_rude() - Queue a callback rude task-based grace period * @rhp: structure to be used for queueing the RCU updates. * @func: actual callback function to be invoked after the grace period * * The callback function will be invoked some time after a full grace * period elapses, in other words after all currently executing RCU * read-side critical sections have completed. call_rcu_tasks_rude() * assumes that the read-side critical sections end at context switch, * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as * usermode execution is schedulable). As such, there are no read-side * primitives analogous to rcu_read_lock() and rcu_read_unlock() because * this primitive is intended to determine that all tasks have passed * through a safe state, not so much for data-structure synchronization. * * See the description of call_rcu() for more detailed information on * memory ordering guarantees. * * This is no longer exported, and is instead reserved for use by * synchronize_rcu_tasks_rude().
*/ staticvoid call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
{
call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
}
/** * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period * * Control will return to the caller some time after a rude rcu-tasks * grace period has elapsed, in other words after all currently * executing rcu-tasks read-side critical sections have elapsed. These * read-side critical sections are delimited by calls to schedule(), * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable * context), and (in theory, anyway) cond_resched(). * * This is a very specialized primitive, intended only for a few uses in * tracing and other situations requiring manipulation of function preambles * and profiling hooks. The synchronize_rcu_tasks_rude() function is not * (yet) intended for heavy use from multiple CPUs. * * See the description of synchronize_rcu() for more detailed information * on memory ordering guarantees.
*/ void synchronize_rcu_tasks_rude(void)
{ if (!IS_ENABLED(CONFIG_ARCH_WANTS_NO_INSTR) || IS_ENABLED(CONFIG_FORCE_TASKS_RUDE_RCU))
synchronize_rcu_tasks_generic(&rcu_tasks_rude);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
//////////////////////////////////////////////////////////////////////// // // Tracing variant of Tasks RCU. This variant is designed to be used // to protect tracing hooks, including those of BPF. This variant // therefore: // // 1. Has explicit read-side markers to allow finite grace periods // in the face of in-kernel loops for PREEMPT=n builds. // // 2. Protects code in the idle loop, exception entry/exit, and // CPU-hotplug code paths, similar to the capabilities of SRCU. // // 3. Avoids expensive read-side instructions, having overhead similar // to that of Preemptible RCU. // // There are of course downsides. For example, the grace-period code // can send IPIs to CPUs, even when those CPUs are in the idle loop or // in nohz_full userspace. If needed, these downsides can be at least // partially remedied. // // Perhaps most important, this variant of RCU does not affect the vanilla // flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace // readers can operate from idle, offline, and exception entry/exit in no // way allows rcu_preempt and rcu_sched readers to also do so. // // The implementation uses rcu_tasks_wait_gp(), which relies on function // pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread() // function sets these function pointers up so that rcu_tasks_wait_gp() // invokes these functions in this order: // // rcu_tasks_trace_pregp_step(): // Disables CPU hotplug, adds all currently executing tasks to the // holdout list, then checks the state of all tasks that blocked // or were preempted within their current RCU Tasks Trace read-side // critical section, adding them to the holdout list if appropriate. // Finally, this function re-enables CPU hotplug. // The ->pertask_func() pointer is NULL, so there is no per-task processing. // rcu_tasks_trace_postscan(): // Invokes synchronize_rcu() to wait for late-stage exiting tasks // to finish exiting. // check_all_holdout_tasks_trace(), repeatedly until holdout list is empty: // Scans the holdout list, attempting to identify a quiescent state // for each task on the list. If there is a quiescent state, the // corresponding task is removed from the holdout list. Once this // list is empty, the grace period has completed. // rcu_tasks_trace_postgp(): // Provides the needed full memory barrier and does debug checks. // // The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks. // // Pre-grace-period update-side code is ordered before the grace period // via the ->cbs_lock and barriers in rcu_tasks_kthread(). Pre-grace-period // read-side code is ordered before the grace period by atomic operations // on .b.need_qs flag of each task involved in this process, or by scheduler // context-switch ordering (for locked-down non-running readers).
// The lockdep state must be outside of #ifdef to be useful. #ifdef CONFIG_DEBUG_LOCK_ALLOC staticstruct lock_class_key rcu_lock_trace_key; struct lockdep_map rcu_trace_lock_map =
STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
EXPORT_SYMBOL_GPL(rcu_trace_lock_map); #endif/* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
#ifdef CONFIG_TASKS_TRACE_RCU
// Record outstanding IPIs to each CPU. No point in sending two... static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
// The number of detections of task quiescent state relying on // heavyweight readers executing explicit memory barriers. staticunsignedlong n_heavy_reader_attempts; staticunsignedlong n_heavy_reader_updates; staticunsignedlong n_heavy_reader_ofl_updates; staticunsignedlong n_trc_holdouts;
/* Load from ->trc_reader_special.b.need_qs with proper ordering. */ static u8 rcu_ld_need_qs(struct task_struct *t)
{
smp_mb(); // Enforce full grace-period ordering. return smp_load_acquire(&t->trc_reader_special.b.need_qs);
}
/* Store to ->trc_reader_special.b.need_qs with proper ordering. */ staticvoid rcu_st_need_qs(struct task_struct *t, u8 v)
{
smp_store_release(&t->trc_reader_special.b.need_qs, v);
smp_mb(); // Enforce full grace-period ordering.
}
/* * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for * the four-byte operand-size restriction of some platforms. * * Returns the old value, which is often ignored.
*/
u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new)
{ return cmpxchg(&t->trc_reader_special.b.need_qs, old, new);
}
EXPORT_SYMBOL_GPL(rcu_trc_cmpxchg_need_qs);
/* * If we are the last reader, signal the grace-period kthread. * Also remove from the per-CPU list of blocked tasks.
*/ void rcu_read_unlock_trace_special(struct task_struct *t)
{ unsignedlong flags; struct rcu_tasks_percpu *rtpcp; union rcu_special trs;
// Open-coded full-word version of rcu_ld_need_qs().
smp_mb(); // Enforce full grace-period ordering.
trs = smp_load_acquire(&t->trc_reader_special);
if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb)
smp_mb(); // Pairs with update-side barriers. // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers. if (trs.b.need_qs == (TRC_NEED_QS_CHECKED | TRC_NEED_QS)) {
u8 result = rcu_trc_cmpxchg_need_qs(t, TRC_NEED_QS_CHECKED | TRC_NEED_QS,
TRC_NEED_QS_CHECKED);
/* Add a task to the holdout list, if it is not already on the list. */ staticvoid trc_add_holdout(struct task_struct *t, struct list_head *bhp)
{ if (list_empty(&t->trc_holdout_list)) {
get_task_struct(t);
list_add(&t->trc_holdout_list, bhp);
n_trc_holdouts++;
}
}
/* Remove a task from the holdout list, if it is in fact present. */ staticvoid trc_del_holdout(struct task_struct *t)
{ if (!list_empty(&t->trc_holdout_list)) {
list_del_init(&t->trc_holdout_list);
put_task_struct(t);
n_trc_holdouts--;
}
}
// If the task is no longer running on this CPU, leave. if (unlikely(texp != t)) goto reset_ipi; // Already on holdout list, so will check later.
// If the task is not in a read-side critical section, and // if this is the last reader, awaken the grace-period kthread.
nesting = READ_ONCE(t->trc_reader_nesting); if (likely(!nesting)) {
rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); goto reset_ipi;
} // If we are racing with an rcu_read_unlock_trace(), try again later. if (unlikely(nesting < 0)) goto reset_ipi;
// Get here if the task is in a read-side critical section. // Set its state so that it will update state for the grace-period // kthread upon exit from that critical section.
rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED);
reset_ipi: // Allow future IPIs to be sent on CPU and for task. // Also order this IPI handler against any later manipulations of // the intended task.
smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
}
/* Callback function for scheduler to check locked-down task. */ staticint trc_inspect_reader(struct task_struct *t, void *bhp_in)
{ struct list_head *bhp = bhp_in; int cpu = task_cpu(t); int nesting; bool ofl = cpu_is_offline(cpu);
if (task_curr(t) && !ofl) { // If no chance of heavyweight readers, do it the hard way. if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) return -EINVAL;
// If heavyweight readers are enabled on the remote task, // we can inspect its state despite its currently running. // However, we cannot safely change its state.
n_heavy_reader_attempts++; // Check for "running" idle tasks on offline CPUs. if (!rcu_watching_zero_in_eqs(cpu, &t->trc_reader_nesting)) return -EINVAL; // No quiescent state, do it the hard way.
n_heavy_reader_updates++;
nesting = 0;
} else { // The task is not running, so C-language access is safe.
nesting = t->trc_reader_nesting;
WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t)))); if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl)
n_heavy_reader_ofl_updates++;
}
// If not exiting a read-side critical section, mark as checked // so that the grace-period kthread will remove it from the // holdout list. if (!nesting) {
rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); return 0; // In QS, so done.
} if (nesting < 0) return -EINVAL; // Reader transitioning, try again later.
// The task is in a read-side critical section, so set up its // state so that it will update state upon exit from that critical // section. if (!rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED))
trc_add_holdout(t, bhp); return 0;
}
/* Attempt to extract the state for the specified task. */ staticvoid trc_wait_for_one_reader(struct task_struct *t, struct list_head *bhp)
{ int cpu;
// If a previous IPI is still in flight, let it complete. if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI return;
// The current task had better be in a quiescent state. if (t == current) {
rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); return;
}
// Attempt to nail down the task for inspection.
get_task_struct(t); if (!task_call_func(t, trc_inspect_reader, bhp)) {
put_task_struct(t); return;
}
put_task_struct(t);
// If this task is not yet on the holdout list, then we are in // an RCU read-side critical section. Otherwise, the invocation of // trc_add_holdout() that added it to the list did the necessary // get_task_struct(). Either way, the task cannot be freed out // from under this code.
// If currently running, send an IPI, either way, add to list.
trc_add_holdout(t, bhp); if (task_curr(t) &&
time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) { // The task is currently running, so try IPIing it.
cpu = task_cpu(t);
// If there is already an IPI outstanding, let it happen. if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0) return;
per_cpu(trc_ipi_to_cpu, cpu) = true;
t->trc_ipi_to_cpu = cpu;
rcu_tasks_trace.n_ipis++; if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) { // Just in case there is some other reason for // failure than the target CPU being offline.
WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n",
__func__, cpu);
rcu_tasks_trace.n_ipis_fails++;
per_cpu(trc_ipi_to_cpu, cpu) = false;
t->trc_ipi_to_cpu = -1;
}
}
}
/* * Initialize for first-round processing for the specified task. * Return false if task is NULL or already taken care of, true otherwise.
*/ staticbool rcu_tasks_trace_pertask_prep(struct task_struct *t, bool notself)
{ // During early boot when there is only the one boot CPU, there // is no idle task for the other CPUs. Also, the grace-period // kthread is always in a quiescent state. In addition, just return // if this task is already on the list. if (unlikely(t == NULL) || (t == current && notself) || !list_empty(&t->trc_holdout_list)) returnfalse;
/* Do first-round processing for the specified task. */ staticvoid rcu_tasks_trace_pertask(struct task_struct *t, struct list_head *hop)
{ if (rcu_tasks_trace_pertask_prep(t, true))
trc_wait_for_one_reader(t, hop);
}
/* Initialize for a new RCU-tasks-trace grace period. */ staticvoid rcu_tasks_trace_pregp_step(struct list_head *hop)
{
LIST_HEAD(blkd_tasks); int cpu; unsignedlong flags; struct rcu_tasks_percpu *rtpcp; struct task_struct *t;
// There shouldn't be any old IPIs, but...
for_each_possible_cpu(cpu)
WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
// Disable CPU hotplug across the CPU scan for the benefit of // any IPIs that might be needed. This also waits for all readers // in CPU-hotplug code paths.
cpus_read_lock();
// These rcu_tasks_trace_pertask_prep() calls are serialized to // allow safe access to the hop list.
for_each_online_cpu(cpu) {
rcu_read_lock(); // Note that cpu_curr_snapshot() picks up the target // CPU's current task while its runqueue is locked with // an smp_mb__after_spinlock(). This ensures that either // the grace-period kthread will see that task's read-side // critical section or the task will see the updater's pre-GP // accesses. The trailing smp_mb() in cpu_curr_snapshot() // does not currently play a role other than simplify // that function's ordering semantics. If these simplified // ordering semantics continue to be redundant, that smp_mb() // might be removed.
t = cpu_curr_snapshot(cpu); if (rcu_tasks_trace_pertask_prep(t, true))
trc_add_holdout(t, hop);
rcu_read_unlock();
cond_resched_tasks_rcu_qs();
}
// Only after all running tasks have been accounted for is it // safe to take care of the tasks that have blocked within their // current RCU tasks trace read-side critical section.
for_each_possible_cpu(cpu) {
rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, cpu);
raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
list_splice_init(&rtpcp->rtp_blkd_tasks, &blkd_tasks); while (!list_empty(&blkd_tasks)) {
rcu_read_lock();
t = list_first_entry(&blkd_tasks, struct task_struct, trc_blkd_node);
list_del_init(&t->trc_blkd_node);
list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
rcu_tasks_trace_pertask(t, hop);
rcu_read_unlock();
raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
}
raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
cond_resched_tasks_rcu_qs();
}
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.21 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.