/* * Record the start of an expedited grace period.
*/ staticvoid rcu_exp_gp_seq_start(void)
{
rcu_seq_start(&rcu_state.expedited_sequence);
rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_exp_snap);
}
/* * Return the value that the expedited-grace-period counter will have * at the end of the current grace period.
*/ static __maybe_unused unsignedlong rcu_exp_gp_seq_endval(void)
{ return rcu_seq_endval(&rcu_state.expedited_sequence);
}
/* * Record the end of an expedited grace period.
*/ staticvoid rcu_exp_gp_seq_end(void)
{
rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_exp_snap);
rcu_seq_end(&rcu_state.expedited_sequence);
smp_mb(); /* Ensure that consecutive grace periods serialize. */
}
/* * Take a snapshot of the expedited-grace-period counter, which is the * earliest value that will indicate that a full grace period has * elapsed since the current time.
*/ staticunsignedlong rcu_exp_gp_seq_snap(void)
{ unsignedlong s;
smp_mb(); /* Caller's modifications seen first by other CPUs. */
s = rcu_seq_snap(&rcu_state.expedited_sequence);
trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap")); return s;
}
/* * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true * if a full expedited grace period has elapsed since that snapshot * was taken.
*/ staticbool rcu_exp_gp_seq_done(unsignedlong s)
{ return rcu_seq_done(&rcu_state.expedited_sequence, s);
}
/* * Reset the ->expmaskinit values in the rcu_node tree to reflect any * recent CPU-online activity. Note that these masks are not cleared * when CPUs go offline, so they reflect the union of all CPUs that have * ever been online. This means that this function normally takes its * no-work-to-do fastpath.
*/ staticvoid sync_exp_reset_tree_hotplug(void)
{ bool done; unsignedlong flags; unsignedlong mask; unsignedlong oldmask; int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */ struct rcu_node *rnp; struct rcu_node *rnp_up;
/* If no new CPUs onlined since last time, nothing to do. */ if (likely(ncpus == rcu_state.ncpus_snap)) return;
rcu_state.ncpus_snap = ncpus;
/* * Each pass through the following loop propagates newly onlined * CPUs for the current rcu_node structure up the rcu_node tree.
*/
rcu_for_each_leaf_node(rnp) {
raw_spin_lock_irqsave_rcu_node(rnp, flags); if (rnp->expmaskinit == rnp->expmaskinitnext) {
raw_spin_unlock_irqrestore_rcu_node(rnp, flags); continue; /* No new CPUs, nothing to do. */
}
/* Update this node's mask, track old value for propagation. */
oldmask = rnp->expmaskinit;
rnp->expmaskinit = rnp->expmaskinitnext;
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
/* If was already nonzero, nothing to propagate. */ if (oldmask) continue;
/* Propagate the new CPU up the tree. */
mask = rnp->grpmask;
rnp_up = rnp->parent;
done = false; while (rnp_up) {
raw_spin_lock_irqsave_rcu_node(rnp_up, flags); if (rnp_up->expmaskinit)
done = true;
rnp_up->expmaskinit |= mask;
raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags); if (done) break;
mask = rnp_up->grpmask;
rnp_up = rnp_up->parent;
}
}
}
/* * Reset the ->expmask values in the rcu_node tree in preparation for * a new expedited grace period.
*/ staticvoid __maybe_unused sync_exp_reset_tree(void)
{ unsignedlong flags; struct rcu_node *rnp;
sync_exp_reset_tree_hotplug();
rcu_for_each_node_breadth_first(rnp) {
raw_spin_lock_irqsave_rcu_node(rnp, flags);
WARN_ON_ONCE(rnp->expmask);
WRITE_ONCE(rnp->expmask, rnp->expmaskinit); /* * Need to wait for any blocked tasks as well. Note that * additional blocking tasks will also block the expedited GP * until such time as the ->expmask bits are cleared.
*/ if (rcu_is_leaf_node(rnp) && rcu_preempt_has_tasks(rnp))
WRITE_ONCE(rnp->exp_tasks, rnp->blkd_tasks.next);
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
}
/* * Return non-zero if there is no RCU expedited grace period in progress * for the specified rcu_node structure, in other words, if all CPUs and * tasks covered by the specified rcu_node structure have done their bit * for the current expedited grace period.
*/ staticbool sync_rcu_exp_done(struct rcu_node *rnp)
{
raw_lockdep_assert_held_rcu_node(rnp); return READ_ONCE(rnp->exp_tasks) == NULL &&
READ_ONCE(rnp->expmask) == 0;
}
/* * Like sync_rcu_exp_done(), but where the caller does not hold the * rcu_node's ->lock.
*/ staticbool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
{ unsignedlong flags; bool ret;
raw_spin_lock_irqsave_rcu_node(rnp, flags);
ret = sync_rcu_exp_done(rnp);
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
return ret;
}
/* * Report the exit from RCU read-side critical section for the last task * that queued itself during or before the current expedited preemptible-RCU * grace period. This event is reported either to the rcu_node structure on * which the task was queued or to one of that rcu_node structure's ancestors, * recursively up the tree. (Calm down, calm down, we do the recursion * iteratively!)
*/ staticvoid __rcu_report_exp_rnp(struct rcu_node *rnp, bool wake, unsignedlong flags)
__releases(rnp->lock)
{ unsignedlong mask;
raw_lockdep_assert_held_rcu_node(rnp); for (;;) { if (!sync_rcu_exp_done(rnp)) { if (!rnp->expmask)
rcu_initiate_boost(rnp, flags); else
raw_spin_unlock_irqrestore_rcu_node(rnp, flags); break;
} if (rnp->parent == NULL) {
raw_spin_unlock_irqrestore_rcu_node(rnp, flags); if (wake)
swake_up_one(&rcu_state.expedited_wq);
/* * Report expedited quiescent state for specified node. This is a * lock-acquisition wrapper function for __rcu_report_exp_rnp().
*/ staticvoid __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
{ unsignedlong flags;
/* * Report expedited quiescent state for multiple CPUs, all covered by the * specified leaf rcu_node structure, which is acquired by the caller.
*/ staticvoid rcu_report_exp_cpu_mult(struct rcu_node *rnp, unsignedlong flags, unsignedlong mask_in, bool wake)
__releases(rnp->lock)
{ int cpu; unsignedlong mask; struct rcu_data *rdp;
/* Common code for work-done checking. */ staticbool sync_exp_work_done(unsignedlong s)
{ if (rcu_exp_gp_seq_done(s)) {
trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done")); /* * Order GP completion with preceding accesses. Order also GP * completion with post GP update side accesses. Pairs with * rcu_seq_end().
*/
smp_mb(); returntrue;
} returnfalse;
}
/* * Funnel-lock acquisition for expedited grace periods. Returns true * if some other task completed an expedited grace period that this task * can piggy-back on, and with no mutex held. Otherwise, returns false * with the mutex held, indicating that the caller must actually do the * expedited grace period.
*/ staticbool exp_funnel_lock(unsignedlong s)
{ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id()); struct rcu_node *rnp = rdp->mynode; struct rcu_node *rnp_root = rcu_get_root();
/* * Each pass through the following loop works its way up * the rcu_node tree, returning if others have done the work or * otherwise falls through to acquire ->exp_mutex. The mapping * from CPU to rcu_node structure can be inexact, as it is just * promoting locality and is not strictly needed for correctness.
*/ for (; rnp != NULL; rnp = rnp->parent) { if (sync_exp_work_done(s)) returntrue;
/* Work not done, either wait here or go up. */
spin_lock(&rnp->exp_lock); if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
/* * Select the CPUs within the specified rcu_node that the upcoming * expedited grace period needs to wait for.
*/ staticvoid __sync_rcu_exp_select_node_cpus(struct rcu_exp_work *rewp)
{ int cpu; unsignedlong flags; unsignedlong mask_ofl_test; unsignedlong mask_ofl_ipi; int ret; struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
raw_spin_lock_irqsave_rcu_node(rnp, flags);
/* Each pass checks a CPU for identity, offline, and idle. */
mask_ofl_test = 0;
for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) { struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); unsignedlong mask = rdp->grpmask; int snap;
if (raw_smp_processor_id() == cpu ||
!(rnp->qsmaskinitnext & mask)) {
mask_ofl_test |= mask;
} else { /* * Full ordering between remote CPU's post idle accesses * and updater's accesses prior to current GP (and also * the started GP sequence number) is enforced by * rcu_seq_start() implicit barrier, relayed by kworkers * locking and even further by smp_mb__after_unlock_lock() * barriers chained all the way throughout the rnp locking * tree since sync_exp_reset_tree() and up to the current * leaf rnp locking. * * Ordering between remote CPU's pre idle accesses and * post grace period updater's accesses is enforced by the * below acquire semantic.
*/
snap = ct_rcu_watching_cpu_acquire(cpu); if (rcu_watching_snap_in_eqs(snap))
mask_ofl_test |= mask; else
rdp->exp_watching_snap = snap;
}
}
mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
retry_ipi: if (rcu_watching_snap_stopped_since(rdp, rdp->exp_watching_snap)) {
mask_ofl_test |= mask; continue;
} if (get_cpu() == cpu) {
mask_ofl_test |= mask;
put_cpu(); continue;
}
ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
put_cpu(); /* The CPU will report the QS in response to the IPI. */ if (!ret) continue;
/* Failed, raced with CPU hotplug operation. */
raw_spin_lock_irqsave_rcu_node(rnp, flags); if ((rnp->qsmaskinitnext & mask) &&
(rnp->expmask & mask)) { /* Online, so delay for a bit and try again. */
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
schedule_timeout_idle(1); goto retry_ipi;
} /* CPU really is offline, so we must report its QS. */ if (rnp->expmask & mask)
mask_ofl_test |= mask;
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
} /* Report quiescent states for those that went offline. */ if (mask_ofl_test) {
raw_spin_lock_irqsave_rcu_node(rnp, flags);
rcu_report_exp_cpu_mult(rnp, flags, mask_ofl_test, false);
}
}
staticinlinevoid sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp)
{
kthread_init_work(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); /* * Use rcu_exp_par_gp_kworker, because flushing a work item from * another work item on the same kthread worker can result in * deadlock.
*/
kthread_queue_work(READ_ONCE(rnp->exp_kworker), &rnp->rew.rew_work);
}
/* * Select the nodes that the upcoming expedited grace period needs * to wait for.
*/ staticvoid sync_rcu_exp_select_cpus(void)
{ struct rcu_node *rnp;
/* Schedule work for each leaf rcu_node structure. */
rcu_for_each_leaf_node(rnp) {
rnp->exp_need_flush = false; if (!READ_ONCE(rnp->expmask)) continue; /* Avoid early boot non-existent wq. */ if (!rcu_exp_par_worker_started(rnp) ||
rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
rcu_is_last_leaf_node(rnp)) { /* No worker started yet or last leaf, do direct call. */
sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work); continue;
}
sync_rcu_exp_select_cpus_queue_work(rnp);
rnp->exp_need_flush = true;
}
/* Wait for jobs (if any) to complete. */
rcu_for_each_leaf_node(rnp) if (rnp->exp_need_flush)
sync_rcu_exp_select_cpus_flush_work(rnp);
}
/* * Wait for the expedited grace period to elapse, within time limit. * If the time limit is exceeded without the grace period elapsing, * return false, otherwise return true.
*/ staticbool synchronize_rcu_expedited_wait_once(long tlimit)
{ int t; struct rcu_node *rnp_root = rcu_get_root();
t = swait_event_timeout_exclusive(rcu_state.expedited_wq,
sync_rcu_exp_done_unlocked(rnp_root),
tlimit); // Workqueues should not be signaled. if (t > 0 || sync_rcu_exp_done_unlocked(rnp_root)) returntrue;
WARN_ON(t < 0); /* workqueues should not be signaled. */ returnfalse;
}
/* * Print out an expedited RCU CPU stall warning message.
*/ staticvoid synchronize_rcu_expedited_stall(unsignedlong jiffies_start, unsignedlongj)
{ int cpu; unsignedlong mask; int ndetected; struct rcu_node *rnp; struct rcu_node *rnp_root = rcu_get_root();
if (READ_ONCE(csd_lock_suppress_rcu_stall) && csd_lock_is_stuck()) {
pr_err("INFO: %s detected expedited stalls, but suppressed full report due to a stuck CSD-lock.\n", rcu_state.name); return;
}
pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {", rcu_state.name);
ndetected = 0;
rcu_for_each_leaf_node(rnp) {
ndetected += rcu_print_task_exp_stall(rnp);
for_each_leaf_node_possible_cpu(rnp, cpu) { struct rcu_data *rdp;
/* * Wait for the expedited grace period to elapse, issuing any needed * RCU CPU stall warnings along the way.
*/ staticvoid synchronize_rcu_expedited_wait(void)
{ int cpu; unsignedlong j; unsignedlong jiffies_stall; unsignedlong jiffies_start; unsignedlong mask; struct rcu_data *rdp; struct rcu_node *rnp; unsignedlong flags;
/* * Wait for the current expedited grace period to complete, and then * wake up everyone who piggybacked on the just-completed expedited * grace period. Also update all the ->exp_seq_rq counters as needed * in order to avoid counter-wrap problems.
*/ staticvoid rcu_exp_wait_wake(unsignedlong s)
{ struct rcu_node *rnp;
synchronize_rcu_expedited_wait();
// Switch over to wakeup mode, allowing the next GP to proceed. // End the previous grace period only after acquiring the mutex // to ensure that only one GP runs concurrently with wakeups.
mutex_lock(&rcu_state.exp_wake_mutex);
rcu_exp_gp_seq_end();
trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
rcu_for_each_node_breadth_first(rnp) { if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
spin_lock(&rnp->exp_lock); /* Recheck, avoid hang in case someone just arrived. */ if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
WRITE_ONCE(rnp->exp_seq_rq, s);
spin_unlock(&rnp->exp_lock);
}
smp_mb(); /* All above changes before wakeup. */
wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]);
}
trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
mutex_unlock(&rcu_state.exp_wake_mutex);
}
/* * Common code to drive an expedited grace period forward, used by * workqueues and mid-boot-time tasks.
*/ staticvoid rcu_exp_sel_wait_wake(unsignedlong s)
{ /* Initialize the rcu_node tree in preparation for the wait. */
sync_rcu_exp_select_cpus();
/* Wait and clean up, including waking everyone. */
rcu_exp_wait_wake(s);
}
/* Request an expedited quiescent state. */ staticvoid rcu_exp_need_qs(void)
{
lockdep_assert_irqs_disabled();
ASSERT_EXCLUSIVE_WRITER_SCOPED(*this_cpu_ptr(&rcu_data.cpu_no_qs.b.exp));
__this_cpu_write(rcu_data.cpu_no_qs.b.exp, true); /* Store .exp before .rcu_urgent_qs. */
smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
set_tsk_need_resched(current);
set_preempt_need_resched();
}
#ifdef CONFIG_PREEMPT_RCU
/* * Remote handler for smp_call_function_single(). If there is an * RCU read-side critical section in effect, request that the * next rcu_read_unlock() record the quiescent state up the * ->expmask fields in the rcu_node tree. Otherwise, immediately * report the quiescent state.
*/ staticvoid rcu_exp_handler(void *unused)
{ int depth = rcu_preempt_depth(); unsignedlong flags; struct rcu_data *rdp = this_cpu_ptr(&rcu_data); struct rcu_node *rnp = rdp->mynode; struct task_struct *t = current;
/* * WARN if the CPU is unexpectedly already looking for a * QS or has already reported one.
*/
ASSERT_EXCLUSIVE_WRITER_SCOPED(rdp->cpu_no_qs.b.exp); if (WARN_ON_ONCE(!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
READ_ONCE(rdp->cpu_no_qs.b.exp))) return;
/* * Second, the common case of not being in an RCU read-side * critical section. If also enabled or idle, immediately * report the quiescent state, otherwise defer.
*/ if (!depth) { if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
rcu_is_cpu_rrupt_from_idle())
rcu_report_exp_rdp(rdp); else
rcu_exp_need_qs(); return;
}
/* * Third, the less-common case of being in an RCU read-side * critical section. In this case we can count on a future * rcu_read_unlock(). However, this rcu_read_unlock() might * execute on some other CPU, but in that case there will be * a future context switch. Either way, if the expedited * grace period is still waiting on this CPU, set ->deferred_qs * so that the eventual quiescent state will be reported. * Note that there is a large group of race conditions that * can have caused this quiescent state to already have been * reported, so we really do need to check ->expmask.
*/ if (depth > 0) {
raw_spin_lock_irqsave_rcu_node(rnp, flags); if (rnp->expmask & rdp->grpmask) {
WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
t->rcu_read_unlock_special.b.exp_hint = true;
}
raw_spin_unlock_irqrestore_rcu_node(rnp, flags); return;
}
// Fourth and finally, negative nesting depth should not happen.
WARN_ON_ONCE(1);
}
/* * Scan the current list of tasks blocked within RCU read-side critical * sections, printing out the tid of each that is blocking the current * expedited grace period.
*/ staticint rcu_print_task_exp_stall(struct rcu_node *rnp)
{ unsignedlong flags; int ndetected = 0; struct task_struct *t;
/* * Scan the current list of tasks blocked within RCU read-side critical * sections, dumping the stack of each that is blocking the current * expedited grace period.
*/ staticvoid rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp)
{ unsignedlong flags; struct task_struct *t;
if (!rcu_exp_stall_task_details) return;
raw_spin_lock_irqsave_rcu_node(rnp, flags); if (!READ_ONCE(rnp->exp_tasks)) {
raw_spin_unlock_irqrestore_rcu_node(rnp, flags); return;
}
t = list_entry(rnp->exp_tasks->prev, struct task_struct, rcu_node_entry);
list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { /* * We could be printing a lot while holding a spinlock. * Avoid triggering hard lockup.
*/
touch_nmi_watchdog();
sched_show_task(t);
}
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
#else/* #ifdef CONFIG_PREEMPT_RCU */
/* Invoked on each online non-idle CPU for expedited quiescent state. */ staticvoid rcu_exp_handler(void *unused)
{ struct rcu_data *rdp = this_cpu_ptr(&rcu_data); struct rcu_node *rnp = rdp->mynode; bool preempt_bh_enabled = !(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK));
ASSERT_EXCLUSIVE_WRITER_SCOPED(rdp->cpu_no_qs.b.exp); if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
__this_cpu_read(rcu_data.cpu_no_qs.b.exp)) return; if (rcu_is_cpu_rrupt_from_idle() ||
(IS_ENABLED(CONFIG_PREEMPT_COUNT) && preempt_bh_enabled)) {
rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); return;
}
rcu_exp_need_qs();
}
/* * Because preemptible RCU does not exist, we never have to check for * tasks blocked within RCU read-side critical sections that are * blocking the current expedited grace period.
*/ staticint rcu_print_task_exp_stall(struct rcu_node *rnp)
{ return 0;
}
/* * Because preemptible RCU does not exist, we never have to print out * tasks blocked within RCU read-side critical sections that are blocking * the current expedited grace period.
*/ staticvoid rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp)
{
}
#endif/* #else #ifdef CONFIG_PREEMPT_RCU */
/** * synchronize_rcu_expedited - Brute-force RCU grace period * * Wait for an RCU grace period, but expedite it. The basic idea is to * IPI all non-idle non-nohz online CPUs. The IPI handler checks whether * the CPU is in an RCU critical section, and if so, it sets a flag that * causes the outermost rcu_read_unlock() to report the quiescent state * for RCU-preempt or asks the scheduler for help for RCU-sched. On the * other hand, if the CPU is not in an RCU read-side critical section, * the IPI handler reports the quiescent state immediately. * * Although this is a great improvement over previous expedited * implementations, it is still unfriendly to real-time workloads, so is * thus not recommended for any sort of common-case code. In fact, if * you are using synchronize_rcu_expedited() in a loop, please restructure * your code to batch your updates, and then use a single synchronize_rcu() * instead. * * This has the same semantics as (but is more brutal than) synchronize_rcu().
*/ void synchronize_rcu_expedited(void)
{ unsignedlong flags; struct rcu_exp_work rew; struct rcu_node *rnp; unsignedlong s;
/* Is the state is such that the call is a grace period? */ if (rcu_blocking_is_gp()) { // Note well that this code runs with !PREEMPT && !SMP. // In addition, all code that advances grace periods runs // at process level. Therefore, this expedited GP overlaps // with other expedited GPs only by being fully nested within // them, which allows reuse of ->gp_seq_polled_exp_snap.
rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_exp_snap);
rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_exp_snap);
/* If expedited grace periods are prohibited, fall back to normal. */ if (rcu_gp_is_normal()) {
synchronize_rcu_normal(); return;
}
/* Take a snapshot of the sequence number. */
s = rcu_exp_gp_seq_snap(); if (exp_funnel_lock(s)) return; /* Someone else did our work for us. */
/* Ensure that load happens before action based on it. */ if (unlikely((rcu_scheduler_active == RCU_SCHEDULER_INIT) || !rcu_exp_worker_started())) { /* Direct call during scheduler init and early_initcalls(). */
rcu_exp_sel_wait_wake(s);
} else { /* Marshall arguments & schedule the expedited grace period. */
rew.rew_s = s;
synchronize_rcu_expedited_queue_work(&rew);
}
/* Wait for expedited grace period to complete. */
rnp = rcu_get_root();
wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
sync_exp_work_done(s));
/* Let the next expedited grace period start. */
mutex_unlock(&rcu_state.exp_mutex);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
/* * Ensure that start_poll_synchronize_rcu_expedited() has the expedited * RCU grace periods that it needs.
*/ staticvoid sync_rcu_do_polled_gp(struct work_struct *wp)
{ unsignedlong flags; int i = 0; struct rcu_node *rnp = container_of(wp, struct rcu_node, exp_poll_wq); unsignedlong s;
raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
s = rnp->exp_seq_poll_rq;
rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags); if (s == RCU_GET_STATE_COMPLETED) return; while (!poll_state_synchronize_rcu(s)) {
synchronize_rcu_expedited(); if (i == 10 || i == 20)
pr_info("%s: i = %d s = %lx gp_seq_polled = %lx\n", __func__, i, s, READ_ONCE(rcu_state.gp_seq_polled));
i++;
}
raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
s = rnp->exp_seq_poll_rq; if (poll_state_synchronize_rcu(s))
rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
}
/** * start_poll_synchronize_rcu_expedited - Snapshot current RCU state and start expedited grace period * * Returns a cookie to pass to a call to cond_synchronize_rcu(), * cond_synchronize_rcu_expedited(), or poll_state_synchronize_rcu(), * allowing them to determine whether or not any sort of grace period has * elapsed in the meantime. If the needed expedited grace period is not * already slated to start, initiates that grace period.
*/ unsignedlong start_poll_synchronize_rcu_expedited(void)
{ unsignedlong flags; struct rcu_data *rdp; struct rcu_node *rnp; unsignedlong s;
s = get_state_synchronize_rcu();
rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
rnp = rdp->mynode; if (rcu_init_invoked())
raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags); if (!poll_state_synchronize_rcu(s)) { if (rcu_init_invoked()) {
rnp->exp_seq_poll_rq = s;
queue_work(rcu_gp_wq, &rnp->exp_poll_wq);
}
} if (rcu_init_invoked())
raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
/** * start_poll_synchronize_rcu_expedited_full - Take a full snapshot and start expedited grace period * @rgosp: Place to put snapshot of grace-period state * * Places the normal and expedited grace-period states in rgosp. This * state value can be passed to a later call to cond_synchronize_rcu_full() * or poll_state_synchronize_rcu_full() to determine whether or not a * grace period (whether normal or expedited) has elapsed in the meantime. * If the needed expedited grace period is not already slated to start, * initiates that grace period.
*/ void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
{
get_state_synchronize_rcu_full(rgosp);
(void)start_poll_synchronize_rcu_expedited();
}
EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_expedited_full);
/** * cond_synchronize_rcu_expedited - Conditionally wait for an expedited RCU grace period * * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited() * * If any type of full RCU grace period has elapsed since the earlier * call to get_state_synchronize_rcu(), start_poll_synchronize_rcu(), * or start_poll_synchronize_rcu_expedited(), just return. Otherwise, * invoke synchronize_rcu_expedited() to wait for a full grace period. * * Yes, this function does not take counter wrap into account. * But counter wrap is harmless. If the counter wraps, we have waited for * more than 2 billion grace periods (and way more on a 64-bit system!), * so waiting for a couple of additional grace periods should be just fine. * * This function provides the same memory-ordering guarantees that * would be provided by a synchronize_rcu() that was invoked at the call * to the function that provided @oldstate and that returned at the end * of this function.
*/ void cond_synchronize_rcu_expedited(unsignedlong oldstate)
{ if (!poll_state_synchronize_rcu(oldstate))
synchronize_rcu_expedited();
}
EXPORT_SYMBOL_GPL(cond_synchronize_rcu_expedited);
/** * cond_synchronize_rcu_expedited_full - Conditionally wait for an expedited RCU grace period * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full() * * If a full RCU grace period has elapsed since the call to * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was * obtained, just return. Otherwise, invoke synchronize_rcu_expedited() * to wait for a full grace period. * * Yes, this function does not take counter wrap into account. * But counter wrap is harmless. If the counter wraps, we have waited for * more than 2 billion grace periods (and way more on a 64-bit system!), * so waiting for a couple of additional grace periods should be just fine. * * This function provides the same memory-ordering guarantees that * would be provided by a synchronize_rcu() that was invoked at the call * to the function that provided @rgosp and that returned at the end of * this function.
*/ void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
{ if (!poll_state_synchronize_rcu_full(rgosp))
synchronize_rcu_expedited();
}
EXPORT_SYMBOL_GPL(cond_synchronize_rcu_expedited_full);
Messung V0.5
¤ Dauer der Verarbeitung: 0.31 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.