staticinlinevoid user_enter(void)
{ if (context_tracking_enabled())
ct_user_enter(CT_STATE_USER);
} staticinlinevoid user_exit(void)
{ if (context_tracking_enabled())
ct_user_exit(CT_STATE_USER);
}
/* Called with interrupts disabled. */ static __always_inline void user_enter_irqoff(void)
{ if (context_tracking_enabled())
__ct_user_enter(CT_STATE_USER);
} static __always_inline void user_exit_irqoff(void)
{ if (context_tracking_enabled())
__ct_user_exit(CT_STATE_USER);
}
/* * Is RCU watching the current CPU (IOW, it is not in an extended quiescent state)? * * Note that this returns the actual boolean data (watching / not watching), * whereas ct_rcu_watching() returns the RCU_WATCHING subvariable of * context_tracking.state. * * No ordering, as we are sampling CPU-local information.
*/ static __always_inline bool rcu_is_watching_curr_cpu(void)
{ return raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_RCU_WATCHING;
}
/* * Increment the current CPU's context_tracking structure's ->state field * with ordering. Return the new value.
*/ static __always_inline unsignedlong ct_state_inc(int incby)
{ return raw_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state));
}
static __always_inline bool warn_rcu_enter(void)
{ bool ret = false;
/* * Horrible hack to shut up recursive RCU isn't watching fail since * lots of the actual reporting also relies on RCU.
*/
preempt_disable_notrace(); if (!rcu_is_watching_curr_cpu()) {
ret = true;
ct_state_inc(CT_RCU_WATCHING);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.