// SPDX-License-Identifier: GPL-2.0-only /* * PREEMPT_RT substitution for spin/rw_locks * * spinlocks and rwlocks on RT are based on rtmutexes, with a few twists to * resemble the non RT semantics: * * - Contrary to plain rtmutexes, spinlocks and rwlocks are state * preserving. The task state is saved before blocking on the underlying * rtmutex, and restored when the lock has been acquired. Regular wakeups * during that time are redirected to the saved state so no wake up is * missed. * * - Non RT spin/rwlocks disable preemption and eventually interrupts. * Disabling preemption has the side effect of disabling migration and * preventing RCU grace periods. * * The RT substitutions explicitly disable migration and take * rcu_read_lock() across the lock held section.
*/ #include <linux/spinlock.h> #include <linux/export.h>
/* * __might_resched() skips the state check as rtlocks are state * preserving. Take RCU nesting into account as spin/read/write_lock() can * legitimately nest into an RCU read side critical section.
*/ #define RTLOCK_RESCHED_OFFSETS \
(rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT)
if (unlikely(!rt_mutex_cmpxchg_release(&lock->lock, current, NULL)))
rt_mutex_slowunlock(&lock->lock);
}
EXPORT_SYMBOL(rt_spin_unlock);
/* * Wait for the lock to get unlocked: instead of polling for an unlock * (like raw spinlocks do), lock and unlock, to force the kernel to * schedule if there's contention:
*/ void __sched rt_spin_lock_unlock(spinlock_t *lock)
{
spin_lock(lock);
spin_unlock(lock);
}
EXPORT_SYMBOL(rt_spin_lock_unlock);
static __always_inline int __rt_spin_trylock(spinlock_t *lock)
{ int ret = 1;
if (unlikely(!rt_mutex_cmpxchg_acquire(&lock->lock, NULL, current)))
ret = rt_mutex_slowtrylock(&lock->lock);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.