preempt_disable(); /* * We are in an RCU-sched read-side critical section, so the writer * cannot both change sem->state from readers_fast and start checking * counters while we are here. So if we see !sem->state, we know that * the writer won't be checking until we're past the preempt_enable() * and that once the synchronize_rcu() is done, the writer will see * anything we did within this RCU-sched read-size critical section.
*/ if (likely(rcu_sync_is_idle(&sem->rss)))
this_cpu_inc(*sem->read_count); else
__percpu_down_read(sem, false, freezable); /* Unconditional memory barrier */ /* * The preempt_enable() prevents the compiler from * bleeding the critical section out.
*/
preempt_enable();
}
staticinlinebool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
{ bool ret = true;
preempt_disable(); /* * Same as in percpu_down_read().
*/ if (likely(rcu_sync_is_idle(&sem->rss)))
this_cpu_inc(*sem->read_count); else
ret = __percpu_down_read(sem, true, false); /* Unconditional memory barrier */
preempt_enable(); /* * The barrier() from preempt_enable() prevents the compiler from * bleeding the critical section out.
*/
if (ret)
rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
preempt_disable(); /* * Same as in percpu_down_read().
*/ if (likely(rcu_sync_is_idle(&sem->rss))) {
this_cpu_dec(*sem->read_count);
} else { /* * slowpath; reader will only ever wake a single blocked * writer.
*/
smp_mb(); /* B matches C */ /* * In other words, if they see our decrement (presumably to * aggregate zero, as that is the only time it matters) they * will also see our critical section.
*/
this_cpu_dec(*sem->read_count);
rcuwait_wake_up(&sem->writer);
}
preempt_enable();
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.