/* * Check to see if the scheduling-clock interrupt came from an extended * quiescent state, and, if so, tell RCU about it. This function must * be called from hardirq context. It is normally called from the * scheduling-clock interrupt.
*/ void rcu_sched_clock_irq(int user)
{ if (user) {
rcu_qs();
} elseif (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
set_tsk_need_resched(current);
set_preempt_need_resched();
}
}
/* * Reclaim the specified callback, either by invoking it for non-kfree cases or * freeing it directly (for kfree). Return true if kfreeing, false otherwise.
*/ staticinlinebool rcu_reclaim_tiny(struct rcu_head *head)
{
rcu_callback_t f;
/* Invoke the RCU callbacks whose grace period has elapsed. */ static __latent_entropy void rcu_process_callbacks(void)
{ struct rcu_head *next, *list; unsignedlong flags;
/* Move the ready-to-invoke callbacks to a local list. */
local_irq_save(flags); if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) { /* No callbacks ready, so just leave. */
local_irq_restore(flags); return;
}
list = rcu_ctrlblk.rcucblist;
rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail;
*rcu_ctrlblk.donetail = NULL; if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail)
rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist;
rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist;
local_irq_restore(flags);
/* Invoke the callbacks on the local list. */ while (list) {
next = list->next;
prefetch(next);
debug_rcu_head_unqueue(list);
rcu_reclaim_tiny(list);
list = next;
}
}
/* * Wait for a grace period to elapse. But it is illegal to invoke * synchronize_rcu() from within an RCU read-side critical section. * Therefore, any legal call to synchronize_rcu() is a quiescent state, * and so on a UP system, synchronize_rcu() need do nothing, other than * let the polled APIs know that another grace period elapsed. * * (But Lai Jiangshan points out the benefits of doing might_sleep() * to reduce latency.) * * Cool, huh? (Due to Josh Triplett.)
*/ void synchronize_rcu(void)
{
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
lock_is_held(&rcu_lock_map) ||
lock_is_held(&rcu_sched_lock_map), "Illegal synchronize_rcu() in RCU read-side critical section");
preempt_disable();
WRITE_ONCE(rcu_ctrlblk.gp_seq, rcu_ctrlblk.gp_seq + 2);
preempt_enable();
}
EXPORT_SYMBOL_GPL(synchronize_rcu);
/* * Post an RCU callback to be invoked after the end of an RCU grace * period. But since we have but one CPU, that would be after any * quiescent state.
*/ void call_rcu(struct rcu_head *head, rcu_callback_t func)
{ static atomic_t doublefrees; unsignedlong flags;
if (debug_rcu_head_queue(head)) { if (atomic_inc_return(&doublefrees) < 4) {
pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func);
mem_dump_obj(head);
} return;
}
if (unlikely(is_idle_task(current))) { /* force scheduling for rcu_qs() */
resched_cpu(0);
}
}
EXPORT_SYMBOL_GPL(call_rcu);
/* * Store a grace-period-counter "cookie". For more information, * see the Tree RCU header comment.
*/ void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
{
rgosp->rgos_norm = RCU_GET_STATE_COMPLETED;
}
EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full);
/* * Return a grace-period-counter "cookie". For more information, * see the Tree RCU header comment.
*/ unsignedlong get_state_synchronize_rcu(void)
{ return READ_ONCE(rcu_ctrlblk.gp_seq);
}
EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
/* * Return a grace-period-counter "cookie" and ensure that a future grace * period completes. For more information, see the Tree RCU header comment.
*/ unsignedlong start_poll_synchronize_rcu(void)
{ unsignedlong gp_seq = get_state_synchronize_rcu();
if (unlikely(is_idle_task(current))) { /* force scheduling for rcu_qs() */
resched_cpu(0);
} return gp_seq;
}
EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
/* * Return true if the grace period corresponding to oldstate has completed * and false otherwise. For more information, see the Tree RCU header * comment.
*/ bool poll_state_synchronize_rcu(unsignedlong oldstate)
{ return oldstate == RCU_GET_STATE_COMPLETED || READ_ONCE(rcu_ctrlblk.gp_seq) != oldstate;
}
EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.