/* SPDX-License-Identifier: GPL-2.0+ */ /* * Read-Copy Update mechanism for mutual exclusion (tree-based version) * Internal non-public definitions that provide either classic * or preemptible semantics. * * Copyright Red Hat, 2009 * Copyright IBM Corporation, 2009 * * Author: Ingo Molnar <mingo@elte.hu> * Paul E. McKenney <paulmck@linux.ibm.com>
*/
#include"../locking/rtmutex_common.h"
staticbool rcu_rdp_is_offloaded(struct rcu_data *rdp)
{ /* * In order to read the offloaded state of an rdp in a safe * and stable way and prevent from its value to be changed * under us, we must either hold the barrier mutex, the cpu * hotplug lock (read or write) or the nocb lock. Local * non-preemptible reads are also safe. NOCB kthreads and * timers have their own means of synchronization against the * offloaded state updaters.
*/
RCU_NOCB_LOCKDEP_WARN(
!(lockdep_is_held(&rcu_state.barrier_mutex) ||
(IS_ENABLED(CONFIG_HOTPLUG_CPU) && lockdep_is_cpus_held()) ||
lockdep_is_held(&rdp->nocb_lock) ||
lockdep_is_held(&rcu_state.nocb_mutex) ||
((!(IS_ENABLED(CONFIG_PREEMPT_COUNT) && preemptible()) || softirq_count()) &&
rdp == this_cpu_ptr(&rcu_data)) ||
rcu_current_is_nocb_kthread(rdp)), "Unsafe read of RCU_NOCB offloaded state"
);
/* * Check the RCU kernel configuration parameters and print informative * messages about anything out of the ordinary.
*/ staticvoid __init rcu_bootup_announce_oddness(void)
{ if (IS_ENABLED(CONFIG_RCU_TRACE))
pr_info("\tRCU event tracing is enabled.\n"); if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) ||
(!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32))
pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d.\n",
RCU_FANOUT); if (rcu_fanout_exact)
pr_info("\tHierarchical RCU autobalancing is disabled.\n"); if (IS_ENABLED(CONFIG_PROVE_RCU))
pr_info("\tRCU lockdep checking is enabled.\n"); if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
pr_info("\tRCU strict (and thus non-scalable) grace periods are enabled.\n"); if (RCU_NUM_LVLS >= 4)
pr_info("\tFour(or more)-level hierarchy is enabled.\n"); if (RCU_FANOUT_LEAF != 16)
pr_info("\tBuild-time adjustment of leaf fanout to %d.\n",
RCU_FANOUT_LEAF); if (rcu_fanout_leaf != RCU_FANOUT_LEAF)
pr_info("\tBoot-time adjustment of leaf fanout to %d.\n",
rcu_fanout_leaf); if (nr_cpu_ids != NR_CPUS)
pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids); #ifdef CONFIG_RCU_BOOST
pr_info("\tRCU priority boosting: priority %d delay %d ms.\n",
kthread_prio, CONFIG_RCU_BOOST_DELAY); #endif if (blimit != DEFAULT_RCU_BLIMIT)
pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit); if (qhimark != DEFAULT_RCU_QHIMARK)
pr_info("\tBoot-time adjustment of callback high-water mark to %ld.\n", qhimark); if (qlowmark != DEFAULT_RCU_QLOMARK)
pr_info("\tBoot-time adjustment of callback low-water mark to %ld.\n", qlowmark); if (qovld != DEFAULT_RCU_QOVLD)
pr_info("\tBoot-time adjustment of callback overload level to %ld.\n", qovld); if (jiffies_till_first_fqs != ULONG_MAX)
pr_info("\tBoot-time adjustment of first FQS scan delay to %ld jiffies.\n", jiffies_till_first_fqs); if (jiffies_till_next_fqs != ULONG_MAX)
pr_info("\tBoot-time adjustment of subsequent FQS scan delay to %ld jiffies.\n", jiffies_till_next_fqs); if (jiffies_till_sched_qs != ULONG_MAX)
pr_info("\tBoot-time adjustment of scheduler-enlistment delay to %ld jiffies.\n", jiffies_till_sched_qs); if (rcu_kick_kthreads)
pr_info("\tKick kthreads if too-long grace period.\n"); if (IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD))
pr_info("\tRCU callback double-/use-after-free debug is enabled.\n"); if (gp_preinit_delay)
pr_info("\tRCU debug GP pre-init slowdown %d jiffies.\n", gp_preinit_delay); if (gp_init_delay)
pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_init_delay); if (gp_cleanup_delay)
pr_info("\tRCU debug GP cleanup slowdown %d jiffies.\n", gp_cleanup_delay); if (nohz_full_patience_delay < 0) {
pr_info("\tRCU NOCB CPU patience negative (%d), resetting to zero.\n", nohz_full_patience_delay);
nohz_full_patience_delay = 0;
} elseif (nohz_full_patience_delay > 5 * MSEC_PER_SEC) {
pr_info("\tRCU NOCB CPU patience too large (%d), resetting to %ld.\n", nohz_full_patience_delay, 5 * MSEC_PER_SEC);
nohz_full_patience_delay = 5 * MSEC_PER_SEC;
} elseif (nohz_full_patience_delay) {
pr_info("\tRCU NOCB CPU patience set to %d milliseconds.\n", nohz_full_patience_delay);
}
nohz_full_patience_delay_jiffies = msecs_to_jiffies(nohz_full_patience_delay); if (!use_softirq)
pr_info("\tRCU_SOFTIRQ processing moved to rcuc kthreads.\n"); if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG))
pr_info("\tRCU debug extended QS entry/exit.\n");
rcupdate_announce_bootup_oddness();
}
/* * Tell them what RCU they are running.
*/ staticvoid __init rcu_bootup_announce(void)
{
pr_info("Preemptible hierarchical RCU implementation.\n");
rcu_bootup_announce_oddness();
}
/* * Queues a task preempted within an RCU-preempt read-side critical * section into the appropriate location within the ->blkd_tasks list, * depending on the states of any ongoing normal and expedited grace * periods. The ->gp_tasks pointer indicates which element the normal * grace period is waiting on (NULL if none), and the ->exp_tasks pointer * indicates which element the expedited grace period is waiting on (again, * NULL if none). If a grace period is waiting on a given element in the * ->blkd_tasks list, it also waits on all subsequent elements. Thus, * adding a task to the tail of the list blocks any grace period that is * already waiting on one of the elements. In contrast, adding a task * to the head of the list won't block any grace period that is already * waiting on one of the elements. * * This queuing is imprecise, and can sometimes make an ongoing grace * period wait for a task that is not strictly speaking blocking it. * Given the choice, we needlessly block a normal grace period rather than * blocking an expedited grace period. * * Note that an endless sequence of expedited grace periods still cannot * indefinitely postpone a normal grace period. Eventually, all of the * fixed number of preempted tasks blocking the normal grace period that are * not also blocking the expedited grace period will resume and complete * their RCU read-side critical sections. At that point, the ->gp_tasks * pointer will equal the ->exp_tasks pointer, at which point the end of * the corresponding expedited grace period will also be the end of the * normal grace period.
*/ staticvoid rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
__releases(rnp->lock) /* But leaves rrupts disabled. */
{ int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) +
(rnp->exp_tasks ? RCU_EXP_TASKS : 0) +
(rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) +
(rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0); struct task_struct *t = current;
raw_lockdep_assert_held_rcu_node(rnp);
WARN_ON_ONCE(rdp->mynode != rnp);
WARN_ON_ONCE(!rcu_is_leaf_node(rnp)); /* RCU better not be waiting on newly onlined CPUs! */
WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask &
rdp->grpmask);
/* * Decide where to queue the newly blocked task. In theory, * this could be an if-statement. In practice, when I tried * that, it was quite messy.
*/ switch (blkd_state) { case 0: case RCU_EXP_TASKS: case RCU_EXP_TASKS | RCU_GP_BLKD: case RCU_GP_TASKS: case RCU_GP_TASKS | RCU_EXP_TASKS:
/* * Blocking neither GP, or first task blocking the normal * GP but not blocking the already-waiting expedited GP. * Queue at the head of the list to avoid unnecessarily * blocking the already-waiting GPs.
*/
list_add(&t->rcu_node_entry, &rnp->blkd_tasks); break;
case RCU_EXP_BLKD: case RCU_GP_BLKD: case RCU_GP_BLKD | RCU_EXP_BLKD: case RCU_GP_TASKS | RCU_EXP_BLKD: case RCU_GP_TASKS | RCU_GP_BLKD | RCU_EXP_BLKD: case RCU_GP_TASKS | RCU_EXP_TASKS | RCU_GP_BLKD | RCU_EXP_BLKD:
/* * First task arriving that blocks either GP, or first task * arriving that blocks the expedited GP (with the normal * GP already waiting), or a task arriving that blocks * both GPs with both GPs already waiting. Queue at the * tail of the list to avoid any GP waiting on any of the * already queued tasks that are not blocking it.
*/
list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks); break;
case RCU_EXP_TASKS | RCU_EXP_BLKD: case RCU_EXP_TASKS | RCU_GP_BLKD | RCU_EXP_BLKD: case RCU_GP_TASKS | RCU_EXP_TASKS | RCU_EXP_BLKD:
/* * Second or subsequent task blocking the expedited GP. * The task either does not block the normal GP, or is the * first task blocking the normal GP. Queue just after * the first task blocking the expedited GP.
*/
list_add(&t->rcu_node_entry, rnp->exp_tasks); break;
case RCU_GP_TASKS | RCU_GP_BLKD: case RCU_GP_TASKS | RCU_EXP_TASKS | RCU_GP_BLKD:
/* * Second or subsequent task blocking the normal GP. * The task does not block the expedited GP. Queue just * after the first task blocking the normal GP.
*/
list_add(&t->rcu_node_entry, rnp->gp_tasks); break;
default:
/* Yet another exercise in excessive paranoia. */
WARN_ON_ONCE(1); break;
}
/* * We have now queued the task. If it was the first one to * block either grace period, update the ->gp_tasks and/or * ->exp_tasks pointers, respectively, to reference the newly * blocked tasks.
*/ if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) {
WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry);
WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq);
} if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
WRITE_ONCE(rnp->exp_tasks, &t->rcu_node_entry);
WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) !=
!(rnp->qsmask & rdp->grpmask));
WARN_ON_ONCE(!(blkd_state & RCU_EXP_BLKD) !=
!(rnp->expmask & rdp->grpmask));
raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */
/* * Report the quiescent state for the expedited GP. This expedited * GP should not be able to end until we report, so there should be * no need to check for a subsequent expedited GP. (Though we are * still in a quiescent state in any case.) * * Interrupts are disabled, so ->cpu_no_qs.b.exp cannot change.
*/ if (blkd_state & RCU_EXP_BLKD && rdp->cpu_no_qs.b.exp)
rcu_report_exp_rdp(rdp); else
WARN_ON_ONCE(rdp->cpu_no_qs.b.exp);
ASSERT_EXCLUSIVE_WRITER_SCOPED(rdp->cpu_no_qs.b.exp);
}
/* * Record a preemptible-RCU quiescent state for the specified CPU. * Note that this does not necessarily mean that the task currently running * on the CPU is in a quiescent state: Instead, it means that the current * grace period need not wait on any RCU read-side critical section that * starts later on this CPU. It also means that if the current task is * in an RCU read-side critical section, it has already added itself to * some leaf rcu_node structure's ->blkd_tasks list. In addition to the * current task, there might be any number of other tasks blocked while * in an RCU read-side critical section. * * Unlike non-preemptible-RCU, quiescent state reports for expedited * grace periods are handled separately via deferred quiescent states * and context switch events. * * Callers to this function must disable preemption.
*/ staticvoid rcu_qs(void)
{
RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!\n"); if (__this_cpu_read(rcu_data.cpu_no_qs.b.norm)) {
trace_rcu_grace_period(TPS("rcu_preempt"),
__this_cpu_read(rcu_data.gp_seq),
TPS("cpuqs"));
__this_cpu_write(rcu_data.cpu_no_qs.b.norm, false);
barrier(); /* Coordinate with rcu_flavor_sched_clock_irq(). */
WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, false);
}
}
/* * We have entered the scheduler, and the current task might soon be * context-switched away from. If this task is in an RCU read-side * critical section, we will no longer be able to rely on the CPU to * record that fact, so we enqueue the task on the blkd_tasks list. * The task will dequeue itself when it exits the outermost enclosing * RCU read-side critical section. Therefore, the current grace period * cannot be permitted to complete until the blkd_tasks list entries * predating the current grace period drain, in other words, until * rnp->gp_tasks becomes NULL. * * Caller must disable interrupts.
*/ void rcu_note_context_switch(bool preempt)
{ struct task_struct *t = current; struct rcu_data *rdp = this_cpu_ptr(&rcu_data); struct rcu_node *rnp;
/* Possibly blocking in an RCU read-side critical section. */
rnp = rdp->mynode;
raw_spin_lock_rcu_node(rnp);
t->rcu_read_unlock_special.b.blocked = true;
t->rcu_blocked_node = rnp;
/* * Verify the CPU's sanity, trace the preemption, and * then queue the task as required based on the states * of any ongoing and expedited grace periods.
*/
WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp));
WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
trace_rcu_preempt_task(rcu_state.name,
t->pid,
(rnp->qsmask & rdp->grpmask)
? rnp->gp_seq
: rcu_seq_snap(&rnp->gp_seq));
rcu_preempt_ctxt_queue(rnp, rdp);
} else {
rcu_preempt_deferred_qs(t);
}
/* * Either we were not in an RCU read-side critical section to * begin with, or we have now recorded that critical section * globally. Either way, we can now note a quiescent state * for this CPU. Again, if we were in an RCU read-side critical * section, and if that critical section was blocking the current * grace period, then the fact that the task has been enqueued * means that we continue to block the current grace period.
*/
rcu_qs(); if (rdp->cpu_no_qs.b.exp)
rcu_report_exp_rdp(rdp);
rcu_tasks_qs(current, preempt);
trace_rcu_utilization(TPS("End context switch"));
}
EXPORT_SYMBOL_GPL(rcu_note_context_switch);
/* * Check for preempted RCU readers blocking the current grace period * for the specified rcu_node structure. If the caller needs a reliable * answer, it must hold the rcu_node's ->lock.
*/ staticint rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
{ return READ_ONCE(rnp->gp_tasks) != NULL;
}
/* limit value for ->rcu_read_lock_nesting. */ #define RCU_NEST_PMAX (INT_MAX / 2)
/* * Preemptible RCU implementation for rcu_read_lock(). * Just increment ->rcu_read_lock_nesting, shared state will be updated * if we block.
*/ void __rcu_read_lock(void)
{
rcu_preempt_read_enter(); if (IS_ENABLED(CONFIG_PROVE_LOCKING))
WARN_ON_ONCE(rcu_preempt_depth() > RCU_NEST_PMAX); if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) && rcu_state.gp_kthread)
WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, true);
barrier(); /* critical section after entry code. */
}
EXPORT_SYMBOL_GPL(__rcu_read_lock);
/* * Preemptible RCU implementation for rcu_read_unlock(). * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then * invoke rcu_read_unlock_special() to clean up after a context switch * in an RCU read-side critical section and other special cases.
*/ void __rcu_read_unlock(void)
{ struct task_struct *t = current;
barrier(); // critical section before exit code. if (rcu_preempt_read_exit() == 0) {
barrier(); // critical-section exit before .s check. if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
rcu_read_unlock_special(t);
} if (IS_ENABLED(CONFIG_PROVE_LOCKING)) { int rrln = rcu_preempt_depth();
/* * Advance a ->blkd_tasks-list pointer to the next entry, instead * returning NULL if at the end of the list.
*/ staticstruct list_head *rcu_next_node_entry(struct task_struct *t, struct rcu_node *rnp)
{ struct list_head *np;
/* * Return true if the specified rcu_node structure has tasks that were * preempted within an RCU read-side critical section.
*/ staticbool rcu_preempt_has_tasks(struct rcu_node *rnp)
{ return !list_empty(&rnp->blkd_tasks);
}
/* * Report deferred quiescent states. The deferral time can * be quite short, for example, in the case of the call from * rcu_read_unlock_special().
*/ static notrace void
rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsignedlong flags)
{ bool empty_exp; bool empty_norm; bool empty_exp_now; struct list_head *np; bool drop_boost_mutex = false; struct rcu_data *rdp; struct rcu_node *rnp; union rcu_special special;
rdp = this_cpu_ptr(&rcu_data); if (rdp->defer_qs_iw_pending == DEFER_QS_PENDING)
rdp->defer_qs_iw_pending = DEFER_QS_IDLE;
/* * If RCU core is waiting for this CPU to exit its critical section, * report the fact that it has exited. Because irqs are disabled, * t->rcu_read_unlock_special cannot change.
*/
special = t->rcu_read_unlock_special; if (!special.s && !rdp->cpu_no_qs.b.exp) {
local_irq_restore(flags); return;
}
t->rcu_read_unlock_special.s = 0; if (special.b.need_qs) { if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
rdp->cpu_no_qs.b.norm = false;
rcu_report_qs_rdp(rdp);
udelay(rcu_unlock_delay);
} else {
rcu_qs();
}
}
/* * Respond to a request by an expedited grace period for a * quiescent state from this CPU. Note that requests from * tasks are handled when removing the task from the * blocked-tasks list below.
*/ if (rdp->cpu_no_qs.b.exp)
rcu_report_exp_rdp(rdp);
/* Clean up if blocked during RCU read-side critical section. */ if (special.b.blocked) {
/* * Remove this task from the list it blocked on. The task * now remains queued on the rcu_node corresponding to the * CPU it first blocked on, so there is no longer any need * to loop. Retain a WARN_ON_ONCE() out of sheer paranoia.
*/
rnp = t->rcu_blocked_node;
raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
WARN_ON_ONCE(rnp != t->rcu_blocked_node);
WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq &&
(!empty_norm || rnp->qsmask));
empty_exp = sync_rcu_exp_done(rnp);
np = rcu_next_node_entry(t, rnp);
list_del_init(&t->rcu_node_entry);
t->rcu_blocked_node = NULL;
trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
rnp->gp_seq, t->pid); if (&t->rcu_node_entry == rnp->gp_tasks)
WRITE_ONCE(rnp->gp_tasks, np); if (&t->rcu_node_entry == rnp->exp_tasks)
WRITE_ONCE(rnp->exp_tasks, np); if (IS_ENABLED(CONFIG_RCU_BOOST)) { /* Snapshot ->boost_mtx ownership w/rnp->lock held. */
drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx.rtmutex) == t; if (&t->rcu_node_entry == rnp->boost_tasks)
WRITE_ONCE(rnp->boost_tasks, np);
}
/* * If this was the last task on the current list, and if * we aren't waiting on any CPUs, report the quiescent state. * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, * so we must take a snapshot of the expedited state.
*/
empty_exp_now = sync_rcu_exp_done(rnp); if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
rnp->gp_seq,
0, rnp->qsmask,
rnp->level,
rnp->grplo,
rnp->grphi,
!!rnp->gp_tasks);
rcu_report_unblock_qs_rnp(rnp, flags);
} else {
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
/* * If this was the last task on the expedited lists, * then we need to report up the rcu_node hierarchy.
*/ if (!empty_exp && empty_exp_now)
rcu_report_exp_rnp(rnp, true);
/* Unboost if we were boosted. */ if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
rt_mutex_futex_unlock(&rnp->boost_mtx.rtmutex);
} else {
local_irq_restore(flags);
}
}
/* * Is a deferred quiescent-state pending, and are we also not in * an RCU read-side critical section? It is the caller's responsibility * to ensure it is otherwise safe to report any deferred quiescent * states. The reason for this is that it is safe to report a * quiescent state during context switch even though preemption * is disabled. This function cannot be expected to understand these * nuances, so the caller must handle them.
*/ static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t)
{ return (__this_cpu_read(rcu_data.cpu_no_qs.b.exp) ||
READ_ONCE(t->rcu_read_unlock_special.s)) &&
rcu_preempt_depth() == 0;
}
/* * Report a deferred quiescent state if needed and safe to do so. * As with rcu_preempt_need_deferred_qs(), "safe" involves only * not being in an RCU read-side critical section. The caller must * evaluate safety in terms of interrupt, softirq, and preemption * disabling.
*/
notrace void rcu_preempt_deferred_qs(struct task_struct *t)
{ unsignedlong flags;
if (!rcu_preempt_need_deferred_qs(t)) return;
local_irq_save(flags);
rcu_preempt_deferred_qs_irqrestore(t, flags);
}
/* * Minimal handler to give the scheduler a chance to re-evaluate.
*/ staticvoid rcu_preempt_deferred_qs_handler(struct irq_work *iwp)
{ unsignedlong flags; struct rcu_data *rdp;
/* * If the IRQ work handler happens to run in the middle of RCU read-side * critical section, it could be ineffective in getting the scheduler's * attention to report a deferred quiescent state (the whole point of the * IRQ work). For this reason, requeue the IRQ work. * * Basically, we want to avoid following situation: * 1. rcu_read_unlock() queues IRQ work (state -> DEFER_QS_PENDING) * 2. CPU enters new rcu_read_lock() * 3. IRQ work runs but cannot report QS due to rcu_preempt_depth() > 0 * 4. rcu_read_unlock() does not re-queue work (state still PENDING) * 5. Deferred QS reporting does not happen.
*/ if (rcu_preempt_depth() > 0)
WRITE_ONCE(rdp->defer_qs_iw_pending, DEFER_QS_IDLE);
local_irq_restore(flags);
}
/* * Check if expedited grace period processing during unlock is needed. * * This function determines whether expedited handling is required based on: * 1. Task blocking an expedited grace period (based on a heuristic, could be * false-positive, see below.) * 2. CPU participating in an expedited grace period * 3. Strict grace period mode requiring expedited handling * 4. RCU priority deboosting needs when interrupts were disabled * * @t: The task being checked * @rdp: The per-CPU RCU data * @rnp: The RCU node for this CPU * @irqs_were_disabled: Whether interrupts were disabled before rcu_read_unlock() * * Returns true if expedited processing of the rcu_read_unlock() is needed.
*/ staticbool rcu_unlock_needs_exp_handling(struct task_struct *t, struct rcu_data *rdp, struct rcu_node *rnp, bool irqs_were_disabled)
{ /* * Check if this task is blocking an expedited grace period. If the * task was preempted within an RCU read-side critical section and is * on the expedited grace period blockers list (exp_tasks), we need * expedited handling to unblock the expedited GP. This is not an exact * check because 't' might not be on the exp_tasks list at all - its * just a fast heuristic that can be false-positive sometimes.
*/ if (t->rcu_blocked_node && READ_ONCE(t->rcu_blocked_node->exp_tasks)) returntrue;
/* * Check if this CPU is participating in an expedited grace period. * The expmask bitmap tracks which CPUs need to check in for the * current expedited GP. If our CPU's bit is set, we need expedited * handling to help complete the expedited GP.
*/ if (rdp->grpmask & READ_ONCE(rnp->expmask)) returntrue;
/* * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, all grace periods * are treated as short for testing purposes even if that means * disturbing the system more. Check if either: * - This CPU has not yet reported a quiescent state, or * - This task was preempted within an RCU critical section * In either case, require expedited handling for strict GP mode.
*/ if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) &&
((rdp->grpmask & READ_ONCE(rnp->qsmask)) || t->rcu_blocked_node)) returntrue;
/* * RCU priority boosting case: If a task is subject to RCU priority * boosting and exits an RCU read-side critical section with interrupts * disabled, we need expedited handling to ensure timely deboosting. * Without this, a low-priority task could incorrectly run at high * real-time priority for an extended period degrading real-time * responsiveness. This applies to all CONFIG_RCU_BOOST=y kernels, * not just to PREEMPT_RT.
*/ if (IS_ENABLED(CONFIG_RCU_BOOST) && irqs_were_disabled && t->rcu_blocked_node) returntrue;
returnfalse;
}
/* * Handle special cases during rcu_read_unlock(), such as needing to * notify RCU core processing or task having blocked during the RCU * read-side critical section.
*/ staticvoid rcu_read_unlock_special(struct task_struct *t)
{ unsignedlong flags; bool irqs_were_disabled; bool preempt_bh_were_disabled =
!!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK));
/* NMI handlers cannot block and cannot safely manipulate state. */ if (in_nmi()) return;
// Need to defer quiescent state until everything is enabled. if (use_softirq && (in_hardirq() || (needs_exp && !irqs_were_disabled))) { // Using softirq, safe to awaken, and either the // wakeup is free or there is either an expedited // GP in flight or a potential need to deboost.
raise_softirq_irqoff(RCU_SOFTIRQ);
} else { // Enabling BH or preempt does reschedule, so... // Also if no expediting and no possible deboosting, // slow is OK. Plus nohz_full CPUs eventually get // tick enabled.
set_tsk_need_resched(current);
set_preempt_need_resched(); if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled &&
needs_exp && rdp->defer_qs_iw_pending != DEFER_QS_PENDING &&
cpu_online(rdp->cpu)) { // Get scheduler to re-evaluate and call hooks. // If !IRQ_WORK, FQS scan will eventually IPI.
rdp->defer_qs_iw_pending = DEFER_QS_PENDING;
irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu);
}
}
local_irq_restore(flags); return;
}
rcu_preempt_deferred_qs_irqrestore(t, flags);
}
/* * Check that the list of blocked tasks for the newly completed grace * period is in fact empty. It is a serious bug to complete a grace * period that still has RCU readers blocked! This function must be * invoked -before- updating this rnp's ->gp_seq. * * Also, if there are blocked tasks on the list, they automatically * block the newly created grace period, so set up ->gp_tasks accordingly.
*/ staticvoid rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
{ struct task_struct *t;
RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n");
raw_lockdep_assert_held_rcu_node(rnp); if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
dump_blkd_tasks(rnp, 10); if (rcu_preempt_has_tasks(rnp) &&
(rnp->qsmaskinit || rnp->wait_blkd_tasks)) {
WRITE_ONCE(rnp->gp_tasks, rnp->blkd_tasks.next);
t = container_of(rnp->gp_tasks, struct task_struct,
rcu_node_entry);
trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"),
rnp->gp_seq, t->pid);
}
WARN_ON_ONCE(rnp->qsmask);
}
/* * Check for a quiescent state from the current CPU, including voluntary * context switches for Tasks RCU. When a task blocks, the task is * recorded in the corresponding CPU's rcu_node structure, which is checked * elsewhere, hence this function need only check for quiescent states * related to the current CPU, not to those related to tasks.
*/ staticvoid rcu_flavor_sched_clock_irq(int user)
{ struct task_struct *t = current;
lockdep_assert_irqs_disabled(); if (rcu_preempt_depth() > 0 ||
(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) { /* No QS, force context switch if deferred. */ if (rcu_preempt_need_deferred_qs(t)) {
set_tsk_need_resched(t);
set_preempt_need_resched();
}
} elseif (rcu_preempt_need_deferred_qs(t)) {
rcu_preempt_deferred_qs(t); /* Report deferred QS. */ return;
} elseif (!WARN_ON_ONCE(rcu_preempt_depth())) {
rcu_qs(); /* Report immediate QS. */ return;
}
/* If GP is oldish, ask for help from rcu_read_unlock_special(). */ if (rcu_preempt_depth() > 0 &&
__this_cpu_read(rcu_data.core_needs_qs) &&
__this_cpu_read(rcu_data.cpu_no_qs.b.norm) &&
!t->rcu_read_unlock_special.b.need_qs &&
time_after(jiffies, rcu_state.gp_start + HZ))
t->rcu_read_unlock_special.b.need_qs = true;
}
/* * Check for a task exiting while in a preemptible-RCU read-side * critical section, clean up if so. No need to issue warnings, as * debug_check_no_locks_held() already does this if lockdep is enabled. * Besides, if this function does anything other than just immediately * return, there was a bug of some sort. Spewing warnings from this * function is like as not to simply obscure important prior warnings.
*/ void exit_rcu(void)
{ struct task_struct *t = current;
/* * Dump the blocked-tasks state, but limit the list dump to the * specified number of elements.
*/ staticvoid
dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
{ int cpu; int i; struct list_head *lhp; struct rcu_data *rdp; struct rcu_node *rnp1;
/* * If strict grace periods are enabled, and if the calling * __rcu_read_unlock() marks the beginning of a quiescent state, immediately * report that quiescent state and, if requested, spin for a bit.
*/ void rcu_read_unlock_strict(void)
{ struct rcu_data *rdp;
if (irqs_disabled() || in_atomic_preempt_off() || !rcu_state.gp_kthread) return;
/* * rcu_report_qs_rdp() can only be invoked with a stable rdp and * from the local CPU. * * The in_atomic_preempt_off() check ensures that we come here holding * the last preempt_count (which will get dropped once we return to * __rcu_read_unlock().
*/
rdp = this_cpu_ptr(&rcu_data);
rdp->cpu_no_qs.b.norm = false;
rcu_report_qs_rdp(rdp);
udelay(rcu_unlock_delay);
}
EXPORT_SYMBOL_GPL(rcu_read_unlock_strict);
/* * Tell them what RCU they are running.
*/ staticvoid __init rcu_bootup_announce(void)
{
pr_info("Hierarchical RCU implementation.\n");
rcu_bootup_announce_oddness();
}
/* * Note a quiescent state for PREEMPTION=n. Because we do not need to know * how many quiescent states passed, just if there was at least one since * the start of the grace period, this just sets a flag. The caller must * have disabled preemption.
*/ staticvoid rcu_qs(void)
{
RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!"); if (!__this_cpu_read(rcu_data.cpu_no_qs.s)) return;
trace_rcu_grace_period(TPS("rcu_sched"),
__this_cpu_read(rcu_data.gp_seq), TPS("cpuqs"));
__this_cpu_write(rcu_data.cpu_no_qs.b.norm, false); if (__this_cpu_read(rcu_data.cpu_no_qs.b.exp))
rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
}
/* * Register an urgently needed quiescent state. If there is an * emergency, invoke rcu_momentary_eqs() to do a heavy-weight * dyntick-idle quiescent state visible to other CPUs, which will in * some cases serve for expedited as well as normal grace periods. * Either way, register a lightweight quiescent state.
*/ void rcu_all_qs(void)
{ unsignedlong flags;
if (!raw_cpu_read(rcu_data.rcu_urgent_qs)) return;
preempt_disable(); // For CONFIG_PREEMPT_COUNT=y kernels /* Load rcu_urgent_qs before other flags. */ if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
preempt_enable(); return;
}
this_cpu_write(rcu_data.rcu_urgent_qs, false); if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) {
local_irq_save(flags);
rcu_momentary_eqs();
local_irq_restore(flags);
}
rcu_qs();
preempt_enable();
}
EXPORT_SYMBOL_GPL(rcu_all_qs);
/* * Note a PREEMPTION=n context switch. The caller must have disabled interrupts.
*/ void rcu_note_context_switch(bool preempt)
{
trace_rcu_utilization(TPS("Start context switch"));
rcu_qs(); /* Load rcu_urgent_qs before other flags. */ if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) goto out;
this_cpu_write(rcu_data.rcu_urgent_qs, false); if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs)))
rcu_momentary_eqs();
out:
rcu_tasks_qs(current, preempt);
trace_rcu_utilization(TPS("End context switch"));
}
EXPORT_SYMBOL_GPL(rcu_note_context_switch);
/* * Because preemptible RCU does not exist, there are never any preempted * RCU readers.
*/ staticint rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
{ return 0;
}
/* * Because there is no preemptible RCU, there can be no readers blocked.
*/ staticbool rcu_preempt_has_tasks(struct rcu_node *rnp)
{ returnfalse;
}
/* * Because there is no preemptible RCU, there can be no deferred quiescent * states.
*/ static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t)
{ returnfalse;
}
// Except that we do need to respond to a request by an expedited // grace period for a quiescent state from this CPU. Note that in // non-preemptible kernels, there can be no context switches within RCU // read-side critical sections, which in turn means that the leaf rcu_node // structure's blocked-tasks list is always empty. is therefore no need to // actually check it. Instead, a quiescent state from this CPU suffices, // and this function is only called from such a quiescent state.
notrace void rcu_preempt_deferred_qs(struct task_struct *t)
{ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
if (READ_ONCE(rdp->cpu_no_qs.b.exp))
rcu_report_exp_rdp(rdp);
}
/* * Because there is no preemptible RCU, there can be no readers blocked, * so there is no need to check for blocked tasks. So check only for * bogus qsmask values.
*/ staticvoid rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
{
WARN_ON_ONCE(rnp->qsmask);
}
/* * Check to see if this CPU is in a non-context-switch quiescent state, * namely user mode and idle loop.
*/ staticvoid rcu_flavor_sched_clock_irq(int user)
{ if (user || rcu_is_cpu_rrupt_from_idle() ||
(IS_ENABLED(CONFIG_PREEMPT_COUNT) &&
(preempt_count() == HARDIRQ_OFFSET))) {
/* * Get here if this CPU took its interrupt from user * mode, from the idle loop without this being a nested * interrupt, or while not holding the task preempt count * (with PREEMPT_COUNT=y). In this case, the CPU is in a * quiescent state, so note it. * * No memory barrier is required here because rcu_qs() * references only CPU-local variables that other CPUs * neither access nor modify, at least not while the * corresponding CPU is online.
*/
rcu_qs();
}
}
/* * Because preemptible RCU does not exist, tasks cannot possibly exit * while in preemptible RCU read-side critical sections.
*/ void exit_rcu(void)
{
}
/* * Dump the guaranteed-empty blocked-tasks state. Trust but verify.
*/ staticvoid
dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
{
WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks));
}
/* * Is the current CPU running the RCU-callbacks kthread? * Caller must have preemption disabled.
*/ staticbool rcu_is_callbacks_kthread(struct rcu_data *rdp)
{ return rdp->rcu_cpu_kthread_task == current ||
rcu_is_callbacks_nocb_kthread(rdp);
}
#ifdef CONFIG_RCU_BOOST
/* * Carry out RCU priority boosting on the task indicated by ->exp_tasks * or ->boost_tasks, advancing the pointer to the next task in the * ->blkd_tasks list. * * Note that irqs must be enabled: boosting the task can block. * Returns 1 if there are more tasks needing to be boosted.
*/ staticint rcu_boost(struct rcu_node *rnp)
{ unsignedlong flags; struct task_struct *t; struct list_head *tb;
if (READ_ONCE(rnp->exp_tasks) == NULL &&
READ_ONCE(rnp->boost_tasks) == NULL) return 0; /* Nothing left to boost. */
raw_spin_lock_irqsave_rcu_node(rnp, flags);
/* * Recheck under the lock: all tasks in need of boosting * might exit their RCU read-side critical sections on their own.
*/ if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
raw_spin_unlock_irqrestore_rcu_node(rnp, flags); return 0;
}
/* * Preferentially boost tasks blocking expedited grace periods. * This cannot starve the normal grace periods because a second * expedited grace period must boost all blocked tasks, including * those blocking the pre-existing normal grace period.
*/ if (rnp->exp_tasks != NULL)
tb = rnp->exp_tasks; else
tb = rnp->boost_tasks;
/* * We boost task t by manufacturing an rt_mutex that appears to * be held by task t. We leave a pointer to that rt_mutex where * task t can find it, and task t will release the mutex when it * exits its outermost RCU read-side critical section. Then * simply acquiring this artificial rt_mutex will boost task * t's priority. (Thanks to tglx for suggesting this approach!) * * Note that task t must acquire rnp->lock to remove itself from * the ->blkd_tasks list, which it will do from exit() if from * nowhere else. We therefore are guaranteed that task t will * stay around at least until we drop rnp->lock. Note that * rnp->lock also resolves races between our priority boosting * and task t's exiting its outermost RCU read-side critical * section.
*/
t = container_of(tb, struct task_struct, rcu_node_entry);
rt_mutex_init_proxy_locked(&rnp->boost_mtx.rtmutex, t);
raw_spin_unlock_irqrestore_rcu_node(rnp, flags); /* Lock only for side effect: boosts task t's priority. */
rt_mutex_lock(&rnp->boost_mtx);
rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */
rnp->n_boosts++;
/* * Check to see if it is time to start boosting RCU readers that are * blocking the current grace period, and, if so, tell the per-rcu_node * kthread to start boosting them. If there is an expedited grace * period in progress, it is always time to boost. * * The caller must hold rnp->lock, which this function releases. * The ->boost_kthread_task is immortal, so we don't need to worry * about it going away.
*/ staticvoid rcu_initiate_boost(struct rcu_node *rnp, unsignedlong flags)
__releases(rnp->lock)
{
raw_lockdep_assert_held_rcu_node(rnp); if (!rnp->boost_kthread_task ||
(!rcu_preempt_blocked_readers_cgp(rnp) && !rnp->exp_tasks)) {
raw_spin_unlock_irqrestore_rcu_node(rnp, flags); return;
} if (rnp->exp_tasks != NULL ||
(rnp->gp_tasks != NULL &&
rnp->boost_tasks == NULL &&
rnp->qsmask == 0 &&
(!time_after(rnp->boost_time, jiffies) || rcu_state.cbovld ||
IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)))) { if (rnp->exp_tasks == NULL)
WRITE_ONCE(rnp->boost_tasks, rnp->gp_tasks);
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rcu_wake_cond(rnp->boost_kthread_task,
READ_ONCE(rnp->boost_kthread_status));
} else {
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
}
/* * Do priority-boost accounting for the start of a new grace period.
*/ staticvoid rcu_preempt_boost_start_gp(struct rcu_node *rnp)
{
rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
}
/* * Create an RCU-boost kthread for the specified node if one does not * already exist. We only create this kthread for preemptible RCU.
*/ staticvoid rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
{ unsignedlong flags; int rnp_index = rnp - rcu_get_root(); struct sched_param sp; struct task_struct *t;
if (rnp->boost_kthread_task) return;
t = kthread_create(rcu_boost_kthread, (void *)rnp, "rcub/%d", rnp_index); if (WARN_ON_ONCE(IS_ERR(t))) return;
/* * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the * grace-period kthread will do force_quiescent_state() processing? * The idea is to avoid waking up RCU core processing on such a * CPU unless the grace period has extended for too long. * * This code relies on the fact that all NO_HZ_FULL CPUs are also * RCU_NOCB_CPU CPUs.
*/ staticbool rcu_nohz_full_cpu(void)
{ #ifdef CONFIG_NO_HZ_FULL if (tick_nohz_full_cpu(smp_processor_id()) &&
(!rcu_gp_in_progress() ||
time_before(jiffies, READ_ONCE(rcu_state.gp_start) + HZ))) returntrue; #endif/* #ifdef CONFIG_NO_HZ_FULL */ returnfalse;
}
/* * Bind the RCU grace-period kthreads to the housekeeping CPU.
*/ staticvoid rcu_bind_gp_kthread(void)
{ if (!tick_nohz_full_enabled()) return;
housekeeping_affine(current, HK_TYPE_RCU);
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.16 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.