// SPDX-License-Identifier: GPL-2.0+ /* * Read-Copy Update mechanism for mutual exclusion (tree-based version) * * Copyright IBM Corporation, 2008 * * Authors: Dipankar Sarma <dipankar@in.ibm.com> * Manfred Spraul <manfred@colorfullife.com> * Paul E. McKenney <paulmck@linux.ibm.com> * * Based on the original work by Paul McKenney <paulmck@linux.ibm.com> * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * * For detailed explanation of Read-Copy Update mechanism see - * Documentation/RCU
*/
/* Dump rcu_node combining tree at boot to verify correct setup. */ staticbool dump_tree;
module_param(dump_tree, bool, 0444); /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */ staticbool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT); #ifndef CONFIG_PREEMPT_RT
module_param(use_softirq, bool, 0444); #endif /* Control rcu_node-tree auto-balancing at boot time. */ staticbool rcu_fanout_exact;
module_param(rcu_fanout_exact, bool, 0444); /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */ staticint rcu_fanout_leaf = RCU_FANOUT_LEAF;
module_param(rcu_fanout_leaf, int, 0444); int rcu_num_lvls __read_mostly = RCU_NUM_LVLS; /* Number of rcu_nodes at specified level. */ int num_rcu_lvl[] = NUM_RCU_LVL_INIT; int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
/* * The rcu_scheduler_active variable is initialized to the value * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE, * RCU can assume that there is but one task, allowing RCU to (for example) * optimize synchronize_rcu() to a simple barrier(). When this variable * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required * to detect real grace periods. This variable is also used to suppress * boot-time false positives from lockdep-RCU error checking. Finally, it * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU * is fully initialized, including all of its kthreads having been spawned.
*/ int rcu_scheduler_active __read_mostly;
EXPORT_SYMBOL_GPL(rcu_scheduler_active);
/* * The rcu_scheduler_fully_active variable transitions from zero to one * during the early_initcall() processing, which is after the scheduler * is capable of creating new tasks. So RCU processing (for example, * creating tasks for RCU priority boosting) must be delayed until after * rcu_scheduler_fully_active transitions from zero to one. We also * currently delay invocation of any RCU callbacks until after this point. * * It might later prove better for people registering RCU callbacks during * early boot to take responsibility for these callbacks, but one step at * a time.
*/ staticint rcu_scheduler_fully_active __read_mostly;
// Add delay to rcu_read_unlock() for strict grace periods. staticint rcu_unlock_delay; #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
module_param(rcu_unlock_delay, int, 0444); #endif
/* Retrieve RCU kthreads priority for rcutorture */ int rcu_get_gp_kthreads_prio(void)
{ return kthread_prio;
}
EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
/* * Number of grace periods between delays, normalized by the duration of * the delay. The longer the delay, the more the grace periods between * each delay. The reason for this normalization is that it means that, * for non-zero delays, the overall slowdown of grace periods is constant * regardless of the duration of the delay. This arrangement balances * the need for long delays to increase some race probabilities with the * need for fast grace periods to increase other race probabilities.
*/ #define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays for debugging. */
/* * Return true if an RCU grace period is in progress. The READ_ONCE()s * permit this function to be invoked without holding the root rcu_node * structure's ->lock, but of course results can be subject to change.
*/ staticint rcu_gp_in_progress(void)
{ return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
}
/* * Return the number of callbacks queued on the specified CPU. * Handles both the nocbs and normal cases.
*/ staticlong rcu_get_n_cbs_cpu(int cpu)
{ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
if (rcu_segcblist_is_enabled(&rdp->cblist)) return rcu_segcblist_n_cbs(&rdp->cblist); return 0;
}
/** * rcu_softirq_qs - Provide a set of RCU quiescent states in softirq processing * * Mark a quiescent state for RCU, Tasks RCU, and Tasks Trace RCU. * This is a special-purpose function to be used in the softirq * infrastructure and perhaps the occasional long-running softirq * handler. * * Note that from RCU's viewpoint, a call to rcu_softirq_qs() is * equivalent to momentarily completely enabling preemption. For * example, given this code:: * * local_bh_disable(); * do_something(); * rcu_softirq_qs(); // A * do_something_else(); * local_bh_enable(); // B * * A call to synchronize_rcu() that began concurrently with the * call to do_something() would be guaranteed to wait only until * execution reached statement A. Without that rcu_softirq_qs(), * that same synchronize_rcu() would instead be guaranteed to wait * until execution reached statement B.
*/ void rcu_softirq_qs(void)
{
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
lock_is_held(&rcu_lock_map) ||
lock_is_held(&rcu_sched_lock_map), "Illegal rcu_softirq_qs() in RCU read-side critical section");
rcu_qs();
rcu_preempt_deferred_qs(current);
rcu_tasks_qs(current, false);
}
/* * Reset the current CPU's RCU_WATCHING counter to indicate that the * newly onlined CPU is no longer in an extended quiescent state. * This will either leave the counter unchanged, or increment it * to the next non-quiescent value. * * The non-atomic test/increment sequence works because the upper bits * of the ->state variable are manipulated only by the corresponding CPU, * or when the corresponding CPU is offline.
*/ staticvoid rcu_watching_online(void)
{ if (ct_rcu_watching() & CT_RCU_WATCHING) return;
ct_state_inc(CT_RCU_WATCHING);
}
/* * Return true if the snapshot returned from ct_rcu_watching() * indicates that RCU is in an extended quiescent state.
*/ staticbool rcu_watching_snap_in_eqs(int snap)
{ return !(snap & CT_RCU_WATCHING);
}
/** * rcu_watching_snap_stopped_since() - Has RCU stopped watching a given CPU * since the specified @snap? * * @rdp: The rcu_data corresponding to the CPU for which to check EQS. * @snap: rcu_watching snapshot taken when the CPU wasn't in an EQS. * * Returns true if the CPU corresponding to @rdp has spent some time in an * extended quiescent state since @snap. Note that this doesn't check if it * /still/ is in an EQS, just that it went through one since @snap. * * This is meant to be used in a loop waiting for a CPU to go through an EQS.
*/ staticbool rcu_watching_snap_stopped_since(struct rcu_data *rdp, int snap)
{ /* * The first failing snapshot is already ordered against the accesses * performed by the remote CPU after it exits idle. * * The second snapshot therefore only needs to order against accesses * performed by the remote CPU prior to entering idle and therefore can * rely solely on acquire semantics.
*/ if (WARN_ON_ONCE(rcu_watching_snap_in_eqs(snap))) returntrue;
/* * Return true if the referenced integer is zero while the specified * CPU remains within a single extended quiescent state.
*/ bool rcu_watching_zero_in_eqs(int cpu, int *vp)
{ int snap;
// If not quiescent, force back to earlier extended quiescent state.
snap = ct_rcu_watching_cpu(cpu) & ~CT_RCU_WATCHING;
smp_rmb(); // Order CT state and *vp reads. if (READ_ONCE(*vp)) returnfalse; // Non-zero, so report failure;
smp_rmb(); // Order *vp read and CT state re-read.
// If still in the same extended quiescent state, we are good! return snap == ct_rcu_watching_cpu(cpu);
}
/* * Let the RCU core know that this CPU has gone through the scheduler, * which is a quiescent state. This is called when the need for a * quiescent state is urgent, so we burn an atomic operation and full * memory barriers to let the RCU core know about it, regardless of what * this CPU might (or might not) do in the near future. * * We inform the RCU core by emulating a zero-duration dyntick-idle period. * * The caller must have disabled interrupts and must not be idle.
*/
notrace void rcu_momentary_eqs(void)
{ int seq;
raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
seq = ct_state_inc(2 * CT_RCU_WATCHING); /* It is illegal to call this from idle state. */
WARN_ON_ONCE(!(seq & CT_RCU_WATCHING));
rcu_preempt_deferred_qs(current);
}
EXPORT_SYMBOL_GPL(rcu_momentary_eqs);
/** * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle * * If the current CPU is idle and running at a first-level (not nested) * interrupt, or directly, from idle, return true. * * The caller must have at least disabled IRQs.
*/ staticint rcu_is_cpu_rrupt_from_idle(void)
{ long nmi_nesting = ct_nmi_nesting();
/* * Usually called from the tick; but also used from smp_function_call() * for expedited grace periods. This latter can result in running from * the idle task, instead of an actual IPI.
*/
lockdep_assert_irqs_disabled();
/* Non-idle interrupt or nested idle interrupt */ if (nmi_nesting > 1) returnfalse;
/* * Non nested idle interrupt (interrupting section where RCU * wasn't watching).
*/ if (nmi_nesting == 1) returntrue;
/* Not in an interrupt */ if (!nmi_nesting) {
RCU_LOCKDEP_WARN(!in_task() || !is_idle_task(current), "RCU nmi_nesting counter not in idle task!"); return !rcu_is_watching_curr_cpu();
}
/* Force an exit from rcu_do_batch() after 3 milliseconds. */ staticlong rcu_resched_ns = 3 * NSEC_PER_MSEC;
module_param(rcu_resched_ns, long, 0644);
/* * How long the grace period must be before we start recruiting * quiescent-state help from rcu_note_context_switch().
*/ static ulong jiffies_till_sched_qs = ULONG_MAX;
module_param(jiffies_till_sched_qs, ulong, 0444); static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
/* * Make sure that we give the grace-period kthread time to detect any * idle CPUs before taking active measures to force quiescent states. * However, don't go below 100 milliseconds, adjusted upwards for really * large systems.
*/ staticvoid adjust_jiffies_till_sched_qs(void)
{ unsignedlong j;
/* If jiffies_till_sched_qs was specified, respect the request. */ if (jiffies_till_sched_qs != ULONG_MAX) {
WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs); return;
} /* Otherwise, set to third fqs scan, but bound below on large system. */
j = READ_ONCE(jiffies_till_first_fqs) +
2 * READ_ONCE(jiffies_till_next_fqs); if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
WRITE_ONCE(jiffies_to_sched_qs, j);
}
staticint param_set_first_fqs_jiffies(constchar *val, conststruct kernel_param *kp)
{
ulong j; int ret = kstrtoul(val, 0, &j);
/* * Return the number of RCU GPs completed thus far for debug & stats.
*/ unsignedlong rcu_get_gp_seq(void)
{ return READ_ONCE(rcu_state.gp_seq);
}
EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
/* * Return the number of RCU expedited batches completed thus far for * debug & stats. Odd numbers mean that a batch is in progress, even * numbers mean idle. The value returned will thus be roughly double * the cumulative batches since boot.
*/ unsignedlong rcu_exp_batches_completed(void)
{ return rcu_state.expedited_sequence;
}
EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
/* * Return the root node of the rcu_state structure.
*/ staticstruct rcu_node *rcu_get_root(void)
{ return &rcu_state.node[0];
}
/* * Send along grace-period-related data for rcutorture diagnostics.
*/ void rcutorture_get_gp_data(int *flags, unsignedlong *gp_seq)
{
*flags = READ_ONCE(rcu_state.gp_flags);
*gp_seq = rcu_seq_current(&rcu_state.gp_seq);
}
EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
#ifdefined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) /* * An empty function that will trigger a reschedule on * IRQ tail once IRQs get re-enabled on userspace/guest resume.
*/ staticvoid late_wakeup_func(struct irq_work *work)
{
}
/* * If either: * * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry. * * In these cases the late RCU wake ups aren't supported in the resched loops and our * last resort is to fire a local irq_work that will trigger a reschedule once IRQs * get re-enabled again.
*/
noinstr void rcu_irq_work_resched(void)
{ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU)) return;
if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU)) return;
#ifdef CONFIG_NO_HZ_FULL /** * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it. * * The scheduler tick is not normally enabled when CPUs enter the kernel * from nohz_full userspace execution. After all, nohz_full userspace * execution is an RCU quiescent state and the time executing in the kernel * is quite short. Except of course when it isn't. And it is not hard to * cause a large system to spend tens of seconds or even minutes looping * in the kernel, which can cause a number of problems, include RCU CPU * stall warnings. * * Therefore, if a nohz_full CPU fails to report a quiescent state * in a timely manner, the RCU grace-period kthread sets that CPU's * ->rcu_urgent_qs flag with the expectation that the next interrupt or * exception will invoke this function, which will turn on the scheduler * tick, which will enable RCU to detect that CPU's quiescent states, * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels. * The tick will be disabled once a quiescent state is reported for * this CPU. * * Of course, in carefully tuned systems, there might never be an * interrupt or exception. In that case, the RCU grace-period kthread * will eventually cause one to happen. However, in less carefully * controlled environments, this function allows RCU to get what it * needs without creating otherwise useless interruptions.
*/ void __rcu_irq_enter_check_tick(void)
{ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
// If we're here from NMI there's nothing to do. if (in_nmi()) return;
RCU_LOCKDEP_WARN(!rcu_is_watching_curr_cpu(), "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
if (!tick_nohz_full_cpu(rdp->cpu) ||
!READ_ONCE(rdp->rcu_urgent_qs) ||
READ_ONCE(rdp->rcu_forced_tick)) { // RCU doesn't need nohz_full help from this CPU, or it is // already getting that help. return;
}
// We get here only when not in an extended quiescent state and // from interrupts (as opposed to NMIs). Therefore, (1) RCU is // already watching and (2) The fact that we are in an interrupt // handler and that the rcu_node lock is an irq-disabled lock // prevents self-deadlock. So we can safely recheck under the lock. // Note that the nohz_full state currently cannot change.
raw_spin_lock_rcu_node(rdp->mynode); if (READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) { // A nohz_full CPU is in the kernel and RCU needs a // quiescent state. Turn on the tick!
WRITE_ONCE(rdp->rcu_forced_tick, true);
tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
}
raw_spin_unlock_rcu_node(rdp->mynode);
}
NOKPROBE_SYMBOL(__rcu_irq_enter_check_tick); #endif/* CONFIG_NO_HZ_FULL */
/* * Check to see if any future non-offloaded RCU-related work will need * to be done by the current CPU, even if none need be done immediately, * returning 1 if so. This function is part of the RCU implementation; * it is -not- an exported member of the RCU API. This is used by * the idle-entry code to figure out whether it is safe to disable the * scheduler-clock interrupt. * * Just check whether or not this CPU has non-offloaded RCU callbacks * queued.
*/ int rcu_needs_cpu(void)
{ return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) &&
!rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data));
}
/* * If any sort of urgency was applied to the current CPU (for example, * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order * to get to a quiescent state, disable it.
*/ staticvoid rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
{
raw_lockdep_assert_held_rcu_node(rdp->mynode);
WRITE_ONCE(rdp->rcu_urgent_qs, false);
WRITE_ONCE(rdp->rcu_need_heavy_qs, false); if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
WRITE_ONCE(rdp->rcu_forced_tick, false);
}
}
/** * rcu_is_watching - RCU read-side critical sections permitted on current CPU? * * Return @true if RCU is watching the running CPU and @false otherwise. * An @true return means that this CPU can safely enter RCU read-side * critical sections. * * Although calls to rcu_is_watching() from most parts of the kernel * will return @true, there are important exceptions. For example, if the * current CPU is deep within its idle loop, in kernel entry/exit code, * or offline, rcu_is_watching() will return @false. * * Make notrace because it can be called by the internal functions of * ftrace, and making this notrace removes unnecessary recursion calls.
*/
notrace bool rcu_is_watching(void)
{ bool ret;
preempt_disable_notrace();
ret = rcu_is_watching_curr_cpu();
preempt_enable_notrace(); return ret;
}
EXPORT_SYMBOL_GPL(rcu_is_watching);
/* * If a holdout task is actually running, request an urgent quiescent * state from its CPU. This is unsynchronized, so migrations can cause * the request to go to the wrong CPU. Which is OK, all that will happen * is that the CPU's next context switch will be a bit slower and next * time around this task will generate another request.
*/ void rcu_request_urgent_qs_task(struct task_struct *t)
{ int cpu;
barrier();
cpu = task_cpu(t); if (!task_curr(t)) return; /* This task is not running on that CPU. */
smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
}
/** * rcu_set_gpwrap_lag - Set RCU GP sequence overflow lag value. * @lag_gps: Set overflow lag to this many grace period worth of counters * which is used by rcutorture to quickly force a gpwrap situation. * @lag_gps = 0 means we reset it back to the boot-time value.
*/ void rcu_set_gpwrap_lag(unsignedlong lag_gps)
{ unsignedlong lag_seq_count;
/* * When trying to report a quiescent state on behalf of some other CPU, * it is our responsibility to check for and handle potential overflow * of the rcu_node ->gp_seq counter with respect to the rcu_data counters. * After all, the CPU might be in deep idle state, and thus executing no * code whatsoever.
*/ staticvoid rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
{
raw_lockdep_assert_held_rcu_node(rnp); if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + seq_gpwrap_lag,
rnp->gp_seq)) {
WRITE_ONCE(rdp->gpwrap, true);
WRITE_ONCE(rdp->gpwrap_count, READ_ONCE(rdp->gpwrap_count) + 1);
} if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
}
/* * Snapshot the specified CPU's RCU_WATCHING counter so that we can later * credit them with an implicit quiescent state. Return 1 if this CPU * is in dynticks idle mode, which is an extended quiescent state.
*/ staticint rcu_watching_snap_save(struct rcu_data *rdp)
{ /* * Full ordering between remote CPU's post idle accesses and updater's * accesses prior to current GP (and also the started GP sequence number) * is enforced by rcu_seq_start() implicit barrier and even further by * smp_mb__after_unlock_lock() barriers chained all the way throughout the * rnp locking tree since rcu_gp_init() and up to the current leaf rnp * locking. * * Ordering between remote CPU's pre idle accesses and post grace period * updater's accesses is enforced by the below acquire semantic.
*/
rdp->watching_snap = ct_rcu_watching_cpu_acquire(rdp->cpu); if (rcu_watching_snap_in_eqs(rdp->watching_snap)) {
trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
rcu_gpnum_ovf(rdp->mynode, rdp); return 1;
} return 0;
}
/* * Returns positive if the specified CPU has passed through a quiescent state * by virtue of being in or having passed through an dynticks idle state since * the last call to rcu_watching_snap_save() for this same CPU, or by * virtue of having been offline. * * Returns negative if the specified CPU needs a force resched. * * Returns zero otherwise.
*/ staticint rcu_watching_snap_recheck(struct rcu_data *rdp)
{ unsignedlong jtsq; int ret = 0; struct rcu_node *rnp = rdp->mynode;
/* * If the CPU passed through or entered a dynticks idle phase with * no active irq/NMI handlers, then we can safely pretend that the CPU * already acknowledged the request to pass through a quiescent * state. Either way, that CPU cannot possibly be in an RCU * read-side critical section that started before the beginning * of the current RCU grace period.
*/ if (rcu_watching_snap_stopped_since(rdp, rdp->watching_snap)) {
trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
rcu_gpnum_ovf(rnp, rdp); return 1;
}
/* * Complain if a CPU that is considered to be offline from RCU's * perspective has not yet reported a quiescent state. After all, * the offline CPU should have reported a quiescent state during * the CPU-offline process, or, failing that, by rcu_gp_init() * if it ran concurrently with either the CPU going offline or the * last task on a leaf rcu_node structure exiting its RCU read-side * critical section while all CPUs corresponding to that structure * are offline. This added warning detects bugs in any of these * code paths. * * The rcu_node structure's ->lock is held here, which excludes * the relevant portions the CPU-hotplug code, the grace-period * initialization code, and the rcu_read_unlock() code paths. * * For more detail, please refer to the "Hotplug CPU" section * of RCU's Requirements documentation.
*/ if (WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp))) { struct rcu_node *rnp1;
/* * A CPU running for an extended time within the kernel can * delay RCU grace periods: (1) At age jiffies_to_sched_qs, * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set * both .rcu_need_heavy_qs and .rcu_urgent_qs. Note that the * unsynchronized assignments to the per-CPU rcu_need_heavy_qs * variable are safe because the assignments are repeated if this * CPU failed to pass through a quiescent state. This code * also checks .jiffies_resched in case jiffies_to_sched_qs * is set way high.
*/
jtsq = READ_ONCE(jiffies_to_sched_qs); if (!READ_ONCE(rdp->rcu_need_heavy_qs) &&
(time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
time_after(jiffies, rcu_state.jiffies_resched) ||
rcu_state.cbovld)) {
WRITE_ONCE(rdp->rcu_need_heavy_qs, true); /* Store rcu_need_heavy_qs before rcu_urgent_qs. */
smp_store_release(&rdp->rcu_urgent_qs, true);
} elseif (time_after(jiffies, rcu_state.gp_start + jtsq)) {
WRITE_ONCE(rdp->rcu_urgent_qs, true);
}
/* * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq! * The above code handles this, but only for straight cond_resched(). * And some in-kernel loops check need_resched() before calling * cond_resched(), which defeats the above code for CPUs that are * running in-kernel with scheduling-clock interrupts disabled. * So hit them over the head with the resched_cpu() hammer!
*/ if (tick_nohz_full_cpu(rdp->cpu) &&
(time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
rcu_state.cbovld)) {
WRITE_ONCE(rdp->rcu_urgent_qs, true);
WRITE_ONCE(rdp->last_fqs_resched, jiffies);
ret = -1;
}
/* * If more than halfway to RCU CPU stall-warning time, invoke * resched_cpu() more frequently to try to loosen things up a bit. * Also check to see if the CPU is getting hammered with interrupts, * but only once per grace period, just to keep the IPIs down to * a dull roar.
*/ if (time_after(jiffies, rcu_state.jiffies_resched)) { if (time_after(jiffies,
READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
WRITE_ONCE(rdp->last_fqs_resched, jiffies);
ret = -1;
} if (IS_ENABLED(CONFIG_IRQ_WORK) &&
!rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
(rnp->ffmask & rdp->grpmask)) {
rdp->rcu_iw_pending = true;
rdp->rcu_iw_gp_seq = rnp->gp_seq;
irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
}
if (rcu_cpu_stall_cputime && rdp->snap_record.gp_seq != rdp->gp_seq) { int cpu = rdp->cpu; struct rcu_snap_record *rsrp; struct kernel_cpustat *kcsp;
/* * rcu_start_this_gp - Request the start of a particular grace period * @rnp_start: The leaf node of the CPU from which to start. * @rdp: The rcu_data corresponding to the CPU from which to start. * @gp_seq_req: The gp_seq of the grace period to start. * * Start the specified grace period, as needed to handle newly arrived * callbacks. The required future grace periods are recorded in each * rcu_node structure's ->gp_seq_needed field. Returns true if there * is reason to awaken the grace-period kthread. * * The caller must hold the specified rcu_node structure's ->lock, which * is why the caller is responsible for waking the grace-period kthread. * * Returns true if the GP thread needs to be awakened else false.
*/ staticbool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, unsignedlong gp_seq_req)
{ bool ret = false; struct rcu_node *rnp;
/* * Use funnel locking to either acquire the root rcu_node * structure's lock or bail out if the need for this grace period * has already been recorded -- or if that grace period has in * fact already started. If there is already a grace period in * progress in a non-leaf node, no recording is needed because the * end of the grace period will scan the leaf rcu_node structures. * Note that rnp_start->lock must not be released.
*/
raw_lockdep_assert_held_rcu_node(rnp_start);
trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf")); for (rnp = rnp_start; 1; rnp = rnp->parent) { if (rnp != rnp_start)
raw_spin_lock_rcu_node(rnp); if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
(rnp != rnp_start &&
rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
trace_rcu_this_gp(rnp, rdp, gp_seq_req,
TPS("Prestarted")); goto unlock_out;
}
WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req); if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) { /* * We just marked the leaf or internal node, and a * grace period is in progress, which means that * rcu_gp_cleanup() will see the marking. Bail to * reduce contention.
*/
trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
TPS("Startedleaf")); goto unlock_out;
} if (rnp != rnp_start && rnp->parent != NULL)
raw_spin_unlock_rcu_node(rnp); if (!rnp->parent) break; /* At root, and perhaps also leaf. */
}
/* If GP already in progress, just leave, otherwise start one. */ if (rcu_gp_in_progress()) {
trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot")); goto unlock_out;
}
trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
WRITE_ONCE(rcu_state.gp_req_activity, jiffies); if (!READ_ONCE(rcu_state.gp_kthread)) {
trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread")); goto unlock_out;
}
trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
ret = true; /* Caller must wake GP kthread. */
unlock_out: /* Push furthest requested GP to leaf node and rcu_data structure. */ if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
} if (rnp != rnp_start)
raw_spin_unlock_rcu_node(rnp); return ret;
}
/* * Clean up any old requests for the just-ended grace period. Also return * whether any additional grace periods have been requested.
*/ staticbool rcu_future_gp_cleanup(struct rcu_node *rnp)
{ bool needmore; struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
/* * Awaken the grace-period kthread. Don't do a self-awaken (unless in an * interrupt or softirq handler, in which case we just might immediately * sleep upon return, resulting in a grace-period hang), and don't bother * awakening when there is nothing for the grace-period kthread to do * (as in several CPUs raced to awaken, we lost), and finally don't try * to awaken a kthread that has not yet been created. If all those checks * are passed, track some debug information and awaken. * * So why do the self-wakeup when in an interrupt or softirq handler * in the grace-period kthread's context? Because the kthread might have * been interrupted just as it was going to sleep, and just after the final * pre-sleep check of the awaken condition. In this case, a wakeup really * is required, and is therefore supplied.
*/ staticvoid rcu_gp_kthread_wake(void)
{ struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
if ((current == t && !in_hardirq() && !in_serving_softirq()) ||
!READ_ONCE(rcu_state.gp_flags) || !t) return;
WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
swake_up_one(&rcu_state.gp_wq);
}
/* * If there is room, assign a ->gp_seq number to any callbacks on this * CPU that have not already been assigned. Also accelerate any callbacks * that were previously assigned a ->gp_seq number that has since proven * to be too conservative, which can happen if callbacks get assigned a * ->gp_seq number while RCU is idle, but with reference to a non-root * rcu_node structure. This function is idempotent, so it does not hurt * to call it repeatedly. Returns an flag saying that we should awaken * the RCU grace-period kthread. * * The caller must hold rnp->lock with interrupts disabled.
*/ staticbool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
{ unsignedlong gp_seq_req; bool ret = false;
/* * Callbacks are often registered with incomplete grace-period * information. Something about the fact that getting exact * information requires acquiring a global lock... RCU therefore * makes a conservative estimate of the grace period number at which * a given callback will become ready to invoke. The following * code checks this estimate and improves it when possible, thus * accelerating callback invocation to an earlier grace-period * number.
*/
gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq); if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
/* Trace depending on how much we were able to accelerate. */ if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB")); else
trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
/* * Similar to rcu_accelerate_cbs(), but does not require that the leaf * rcu_node structure's ->lock be held. It consults the cached value * of ->gp_seq_needed in the rcu_data structure, and if that indicates * that a new grace-period request be made, invokes rcu_accelerate_cbs() * while holding the leaf rcu_node structure's ->lock.
*/ staticvoid rcu_accelerate_cbs_unlocked(struct rcu_node *rnp, struct rcu_data *rdp)
{ unsignedlong c; bool needwake;
rcu_lockdep_assert_cblist_protected(rdp);
c = rcu_seq_snap(&rcu_state.gp_seq); if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { /* Old request still live, so mark recent callbacks. */
(void)rcu_segcblist_accelerate(&rdp->cblist, c); return;
}
raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
needwake = rcu_accelerate_cbs(rnp, rdp);
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ if (needwake)
rcu_gp_kthread_wake();
}
/* * Move any callbacks whose grace period has completed to the * RCU_DONE_TAIL sublist, then compact the remaining sublists and * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL * sublist. This function is idempotent, so it does not hurt to * invoke it repeatedly. As long as it is not invoked -too- often... * Returns true if the RCU grace-period kthread needs to be awakened. * * The caller must hold rnp->lock with interrupts disabled.
*/ staticbool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
{
rcu_lockdep_assert_cblist_protected(rdp);
raw_lockdep_assert_held_rcu_node(rnp);
/* If no pending (not yet ready to invoke) callbacks, nothing to do. */ if (!rcu_segcblist_pend_cbs(&rdp->cblist)) returnfalse;
/* * Find all callbacks whose ->gp_seq numbers indicate that they * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
*/
rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
/* Classify any remaining callbacks. */ return rcu_accelerate_cbs(rnp, rdp);
}
/* * Move and classify callbacks, but only if doing so won't require * that the RCU grace-period kthread be awakened.
*/ staticvoid __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp, struct rcu_data *rdp)
{
rcu_lockdep_assert_cblist_protected(rdp); if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp)) return; // The grace period cannot end while we hold the rcu_node lock. if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))
WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
raw_spin_unlock_rcu_node(rnp);
}
/* * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a * quiescent state. This is intended to be invoked when the CPU notices * a new grace period.
*/ staticvoid rcu_strict_gp_check_qs(void)
{ if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
rcu_read_lock();
rcu_read_unlock();
}
}
/* * Update CPU-local rcu_data state to record the beginnings and ends of * grace periods. The caller must hold the ->lock of the leaf rcu_node * structure corresponding to the current CPU, and must have irqs disabled. * Returns true if the grace-period kthread needs to be awakened.
*/ staticbool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
{ bool ret = false; bool need_qs; constbool offloaded = rcu_rdp_is_offloaded(rdp);
raw_lockdep_assert_held_rcu_node(rnp);
if (rdp->gp_seq == rnp->gp_seq) returnfalse; /* Nothing to do. */
/* Handle the ends of any preceding grace periods first. */ if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
unlikely(rdp->gpwrap)) { if (!offloaded)
ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
rdp->core_needs_qs = false;
trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
} else { if (!offloaded)
ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */ if (rdp->core_needs_qs)
rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
}
/* Now handle the beginnings of any new-to-this-CPU grace periods. */ if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
unlikely(rdp->gpwrap)) { /* * If the current grace period is waiting for this CPU, * set up to detect a quiescent state, otherwise don't * go looking for one.
*/
trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
need_qs = !!(rnp->qsmask & rdp->grpmask);
rdp->cpu_no_qs.b.norm = need_qs;
rdp->core_needs_qs = need_qs;
zero_cpu_stall_ticks(rdp);
}
rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed); if (IS_ENABLED(CONFIG_PROVE_RCU) && rdp->gpwrap)
WRITE_ONCE(rdp->last_sched_clock, jiffies);
WRITE_ONCE(rdp->gpwrap, false);
rcu_gpnum_ovf(rnp, rdp); return ret;
}
/* * Handler for on_each_cpu() to invoke the target CPU's RCU core * processing.
*/ staticvoid rcu_strict_gp_boundary(void *unused)
{
invoke_rcu_core();
}
// Make the polled API aware of the beginning of a grace period. staticvoid rcu_poll_gp_seq_start(unsignedlong *snap)
{ struct rcu_node *rnp = rcu_get_root();
if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
raw_lockdep_assert_held_rcu_node(rnp);
// If RCU was idle, note beginning of GP. if (!rcu_seq_state(rcu_state.gp_seq_polled))
rcu_seq_start(&rcu_state.gp_seq_polled);
// Either way, record current state.
*snap = rcu_state.gp_seq_polled;
}
// Make the polled API aware of the end of a grace period. staticvoid rcu_poll_gp_seq_end(unsignedlong *snap)
{ struct rcu_node *rnp = rcu_get_root();
if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
raw_lockdep_assert_held_rcu_node(rnp);
// If the previously noted GP is still in effect, record the // end of that GP. Either way, zero counter to avoid counter-wrap // problems. if (*snap && *snap == rcu_state.gp_seq_polled) {
rcu_seq_end(&rcu_state.gp_seq_polled);
rcu_state.gp_seq_polled_snap = 0;
rcu_state.gp_seq_polled_exp_snap = 0;
} else {
*snap = 0;
}
}
// Make the polled API aware of the beginning of a grace period, but // where caller does not hold the root rcu_node structure's lock. staticvoid rcu_poll_gp_seq_start_unlocked(unsignedlong *snap)
{ unsignedlong flags; struct rcu_node *rnp = rcu_get_root();
if (rcu_init_invoked()) { if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
lockdep_assert_irqs_enabled();
raw_spin_lock_irqsave_rcu_node(rnp, flags);
}
rcu_poll_gp_seq_start(snap); if (rcu_init_invoked())
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
// Make the polled API aware of the end of a grace period, but where // caller does not hold the root rcu_node structure's lock. staticvoid rcu_poll_gp_seq_end_unlocked(unsignedlong *snap)
{ unsignedlong flags; struct rcu_node *rnp = rcu_get_root();
if (rcu_init_invoked()) { if (rcu_scheduler_active != RCU_SCHEDULER_INACTIVE)
lockdep_assert_irqs_enabled();
raw_spin_lock_irqsave_rcu_node(rnp, flags);
}
rcu_poll_gp_seq_end(snap); if (rcu_init_invoked())
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
/* * There is a single llist, which is used for handling * synchronize_rcu() users' enqueued rcu_synchronize nodes. * Within this llist, there are two tail pointers: * * wait tail: Tracks the set of nodes, which need to * wait for the current GP to complete. * done tail: Tracks the set of nodes, for which grace * period has elapsed. These nodes processing * will be done as part of the cleanup work * execution by a kworker. * * At every grace period init, a new wait node is added * to the llist. This wait node is used as wait tail * for this new grace period. Given that there are a fixed * number of wait nodes, if all wait nodes are in use * (which can happen when kworker callback processing * is delayed) and additional grace period is requested. * This means, a system is slow in processing callbacks. * * TODO: If a slow processing is detected, a first node * in the llist should be used as a wait-tail for this * grace period, therefore users which should wait due * to a slow process are handled by _this_ grace period * and not next. * * Below is an illustration of how the done and wait * tail pointers move from one set of rcu_synchronize nodes * to the other, as grace periods start and finish and * nodes are processed by kworker. * * * a. Initial llist callbacks list: * * +----------+ +--------+ +-------+ * | | | | | | * | head |---------> | cb2 |--------->| cb1 | * | | | | | | * +----------+ +--------+ +-------+ * * * * b. New GP1 Start: * * WAIT TAIL * | * | * v * +----------+ +--------+ +--------+ +-------+ * | | | | | | | | * | head ------> wait |------> cb2 |------> | cb1 | * | | | head1 | | | | | * +----------+ +--------+ +--------+ +-------+ * * * * c. GP completion: * * WAIT_TAIL == DONE_TAIL * * DONE TAIL * | * | * v * +----------+ +--------+ +--------+ +-------+ * | | | | | | | | * | head ------> wait |------> cb2 |------> | cb1 | * | | | head1 | | | | | * +----------+ +--------+ +--------+ +-------+ * * * * d. New callbacks and GP2 start: * * WAIT TAIL DONE TAIL * | | * | | * v v * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+ * | | | | | | | | | | | | | | * | head ------> wait |--->| cb4 |--->| cb3 |--->|wait |--->| cb2 |--->| cb1 | * | | | head2| | | | | |head1| | | | | * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+ * * * * e. GP2 completion: * * WAIT_TAIL == DONE_TAIL * DONE TAIL * | * | * v * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+ * | | | | | | | | | | | | | | * | head ------> wait |--->| cb4 |--->| cb3 |--->|wait |--->| cb2 |--->| cb1 | * | | | head2| | | | | |head1| | | | | * +----------+ +------+ +------+ +------+ +-----+ +-----+ +-----+ * * * While the llist state transitions from d to e, a kworker * can start executing rcu_sr_normal_gp_cleanup_work() and * can observe either the old done tail (@c) or the new * done tail (@e). So, done tail updates and reads need * to use the rel-acq semantics. If the concurrent kworker * observes the old done tail, the newly queued work * execution will process the updated done tail. If the * concurrent kworker observes the new done tail, then * the newly queued work will skip processing the done * tail, as workqueue semantics guarantees that the new * work is executed only after the previous one completes. * * f. kworker callbacks processing complete: * * * DONE TAIL * | * | * v * +----------+ +--------+ * | | | | * | head ------> wait | * | | | head2 | * +----------+ +--------+ *
*/ staticbool rcu_sr_is_wait_head(struct llist_node *node)
{ return &(rcu_state.srs_wait_nodes)[0].node <= node &&
node <= &(rcu_state.srs_wait_nodes)[SR_NORMAL_GP_WAIT_HEAD_MAX - 1].node;
}
staticstruct llist_node *rcu_sr_get_wait_head(void)
{ struct sr_wait_node *sr_wn; int i;
for (i = 0; i < SR_NORMAL_GP_WAIT_HEAD_MAX; i++) {
sr_wn = &(rcu_state.srs_wait_nodes)[i];
if (!atomic_cmpxchg_acquire(&sr_wn->inuse, 0, 1)) return &sr_wn->node;
}
/* * This work execution can potentially execute * while a new done tail is being updated by * grace period kthread in rcu_sr_normal_gp_cleanup(). * So, read and updates of done tail need to * follow acq-rel semantics. * * Given that wq semantics guarantees that a single work * cannot execute concurrently by multiple kworkers, * the done tail list manipulations are protected here.
*/
done = smp_load_acquire(&rcu_state.srs_done_tail); if (WARN_ON_ONCE(!done)) return;
WARN_ON_ONCE(!rcu_sr_is_wait_head(done));
head = done->next;
done->next = NULL;
/* * The dummy node, which is pointed to by the * done tail which is acq-read above is not removed * here. This allows lockless additions of new * rcu_synchronize nodes in rcu_sr_normal_add_req(), * while the cleanup work executes. The dummy * nodes is removed, in next round of cleanup * work execution.
*/
llist_for_each_safe(rcu, next, head) { if (!rcu_sr_is_wait_head(rcu)) {
rcu_sr_normal_complete(rcu); continue;
}
rcu_sr_put_wait_head(rcu);
}
/* Order list manipulations with atomic access. */
atomic_dec_return_release(&rcu_state.srs_cleanups_pending);
}
/* * Helper function for rcu_gp_cleanup().
*/ staticvoid rcu_sr_normal_gp_cleanup(void)
{ struct llist_node *wait_tail, *next = NULL, *rcu = NULL; int done = 0;
wait_tail = rcu_state.srs_wait_tail; if (wait_tail == NULL) return;
/* * Process (a) and (d) cases. See an illustration.
*/
llist_for_each_safe(rcu, next, wait_tail->next) { if (rcu_sr_is_wait_head(rcu)) break;
rcu_sr_normal_complete(rcu); // It can be last, update a next on this step.
wait_tail->next = next;
if (++done == SR_MAX_USERS_WAKE_FROM_GP) break;
}
/* * Fast path, no more users to process except putting the second last * wait head if no inflight-workers. If there are in-flight workers, * they will remove the last wait head. * * Note that the ACQUIRE orders atomic access with list manipulation.
*/ if (wait_tail->next && wait_tail->next->next == NULL &&
rcu_sr_is_wait_head(wait_tail->next) &&
!atomic_read_acquire(&rcu_state.srs_cleanups_pending)) {
rcu_sr_put_wait_head(wait_tail->next);
wait_tail->next = NULL;
}
/* Concurrent sr_normal_gp_cleanup work might observe this update. */
ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_done_tail);
smp_store_release(&rcu_state.srs_done_tail, wait_tail);
/* * We schedule a work in order to perform a final processing * of outstanding users(if still left) and releasing wait-heads * added by rcu_sr_normal_gp_init() call.
*/ if (wait_tail->next) {
atomic_inc(&rcu_state.srs_cleanups_pending); if (!queue_work(sync_wq, &rcu_state.srs_cleanup_work))
atomic_dec(&rcu_state.srs_cleanups_pending);
}
}
/* * Helper function for rcu_gp_init().
*/ staticbool rcu_sr_normal_gp_init(void)
{ struct llist_node *first; struct llist_node *wait_head; bool start_new_poll = false;
first = READ_ONCE(rcu_state.srs_next.first); if (!first || rcu_sr_is_wait_head(first)) return start_new_poll;
wait_head = rcu_sr_get_wait_head(); if (!wait_head) { // Kick another GP to retry.
start_new_poll = true; return start_new_poll;
}
/* Inject a wait-dummy-node. */
llist_add(wait_head, &rcu_state.srs_next);
/* * A waiting list of rcu_synchronize nodes should be empty on * this step, since a GP-kthread, rcu_gp_init() -> gp_cleanup(), * rolls it over. If not, it is a BUG, warn a user.
*/
WARN_ON_ONCE(rcu_state.srs_wait_tail != NULL);
rcu_state.srs_wait_tail = wait_head;
ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_wait_tail);
/* * Initialize a new grace period. Return false if no grace period required.
*/ static noinline_for_stack bool rcu_gp_init(void)
{ unsignedlong flags; unsignedlong oldmask; unsignedlong mask; struct rcu_data *rdp; struct rcu_node *rnp = rcu_get_root(); bool start_new_poll; unsignedlong old_gp_seq;
WRITE_ONCE(rcu_state.gp_activity, jiffies);
raw_spin_lock_irq_rcu_node(rnp); if (!rcu_state.gp_flags) { /* Spurious wakeup, tell caller to go back to sleep. */
raw_spin_unlock_irq_rcu_node(rnp); returnfalse;
}
WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
if (WARN_ON_ONCE(rcu_gp_in_progress())) { /* * Grace period already in progress, don't start another. * Not supposed to be able to happen.
*/
raw_spin_unlock_irq_rcu_node(rnp); returnfalse;
}
/* Advance to a new grace period and initialize state. */
record_gp_stall_check_time(); /* * A new wait segment must be started before gp_seq advanced, so * that previous gp waiters won't observe the new gp_seq.
*/
start_new_poll = rcu_sr_normal_gp_init(); /* Record GP times before starting GP, hence rcu_seq_start(). */
old_gp_seq = rcu_state.gp_seq; /* * Critical ordering: rcu_seq_start() must happen BEFORE the CPU hotplug * scan below. Otherwise we risk a race where a newly onlining CPU could * be missed by the current grace period, potentially leading to * use-after-free errors. For a detailed explanation of this race, see * Documentation/RCU/Design/Requirements/Requirements.rst in the * "Hotplug CPU" section. * * Also note that the root rnp's gp_seq is kept separate from, and lags, * the rcu_state's gp_seq, for a reason. See the Quick-Quiz on * Single-node systems for more details (in Data-Structures.rst).
*/
rcu_seq_start(&rcu_state.gp_seq); /* Ensure that rcu_seq_done_exact() guardband doesn't give false positives. */
WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) &&
rcu_seq_done_exact(&old_gp_seq, rcu_seq_snap(&rcu_state.gp_seq)));
/* * The "start_new_poll" is set to true, only when this GP is not able * to handle anything and there are outstanding users. It happens when * the rcu_sr_normal_gp_init() function was not able to insert a dummy * separator to the llist, because there were no left any dummy-nodes. * * Number of dummy-nodes is fixed, it could be that we are run out of * them, if so we start a new pool request to repeat a try. It is rare * and it means that a system is doing a slow processing of callbacks.
*/ if (start_new_poll)
(void) start_poll_synchronize_rcu();
/* * Apply per-leaf buffered online and offline operations to * the rcu_node tree. Note that this new grace period need not * wait for subsequent online CPUs, and that RCU hooks in the CPU * offlining path, when combined with checks in this function, * will handle CPUs that are currently going offline or that will * go offline later. Please also refer to "Hotplug CPU" section * of RCU's Requirements documentation.
*/
WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF); /* Exclude CPU hotplug operations. */
rcu_for_each_leaf_node(rnp) {
local_irq_disable(); /* * Serialize with CPU offline. See Requirements.rst > Hotplug CPU > * Concurrent Quiescent State Reporting for Offline CPUs.
*/
arch_spin_lock(&rcu_state.ofl_lock);
raw_spin_lock_rcu_node(rnp); if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
!rnp->wait_blkd_tasks) { /* Nothing to do on this leaf rcu_node structure. */
raw_spin_unlock_rcu_node(rnp);
arch_spin_unlock(&rcu_state.ofl_lock);
local_irq_enable(); continue;
}
/* Record old state, apply changes to ->qsmaskinit field. */
oldmask = rnp->qsmaskinit;
rnp->qsmaskinit = rnp->qsmaskinitnext;
/* If zero-ness of ->qsmaskinit changed, propagate up tree. */ if (!oldmask != !rnp->qsmaskinit) {
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.23 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.