// SPDX-License-Identifier: GPL-2.0 /* * Ldisc rw semaphore * * The ldisc semaphore is semantically a rw_semaphore but which enforces * an alternate policy, namely: * 1) Supports lock wait timeouts * 2) Write waiter has priority * 3) Downgrading is not supported * * Implementation notes: * 1) Upper half of semaphore count is a wait count (differs from rwsem * in that rwsem normalizes the upper half to the wait bias) * 2) Lacks overflow checking * * The generic counting was copied and modified from include/asm-generic/rwsem.h * by Paul Mackerras <paulus@samba.org>. * * The scheduling policy was copied and modified from lib/rwsem.c * Written by David Howells (dhowells@redhat.com). * * This implementation incorporates the write lock stealing work of * Michel Lespinasse <walken@google.com>. * * Copyright (C) 2013 Peter Hurley <peter@hurleysoftware.com>
*/
/* * Try to grant read locks to all readers on the read wait list. * Note the 'active part' of the count is incremented by * the number of readers before waking any processes up.
*/
adjust = sem->wait_readers * (LDSEM_ACTIVE_BIAS - LDSEM_WAIT_BIAS);
count = atomic_long_add_return(adjust, &sem->count); do { if (count > 0) break; if (atomic_long_try_cmpxchg(&sem->count, &count, count - adjust)) return;
} while (1);
staticinlineint writer_trylock(struct ld_semaphore *sem)
{ /* * Only wake this writer if the active part of the count can be * transitioned from 0 -> 1
*/ long count = atomic_long_add_return(LDSEM_ACTIVE_BIAS, &sem->count); do { if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) return 1; if (atomic_long_try_cmpxchg(&sem->count, &count, count - LDSEM_ACTIVE_BIAS)) return 0;
} while (1);
}
/* * handle the lock release when processes blocked on it that can now run * - if we come here from up_xxxx(), then: * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed) * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so) * - the spinlock must be held by the caller * - woken process blocks are discarded from the list after having task zeroed
*/ staticvoid __ldsem_wake(struct ld_semaphore *sem)
{ if (!list_empty(&sem->write_wait))
__ldsem_wake_writer(sem); elseif (!list_empty(&sem->read_wait))
__ldsem_wake_readers(sem);
}
/* * wait for the read lock to be granted
*/ staticstruct ld_semaphore __sched *
down_read_failed(struct ld_semaphore *sem, long count, long timeout)
{ struct ldsem_waiter waiter; long adjust = -LDSEM_ACTIVE_BIAS + LDSEM_WAIT_BIAS;
/* set up my own style of waitqueue */
raw_spin_lock_irq(&sem->wait_lock);
/* * Try to reverse the lock attempt but if the count has changed * so that reversing fails, check if there are no waiters, * and early-out if not
*/ do { if (atomic_long_try_cmpxchg(&sem->count, &count, count + adjust)) {
count += adjust; break;
} if (count > 0) {
raw_spin_unlock_irq(&sem->wait_lock); return sem;
}
} while (1);
/* if there are no active locks, wake the new lock owner(s) */ if ((count & LDSEM_ACTIVE_MASK) == 0)
__ldsem_wake(sem);
raw_spin_unlock_irq(&sem->wait_lock);
/* wait to be given the lock */ for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (!smp_load_acquire(&waiter.task)) break; if (!timeout) break;
timeout = schedule_timeout(timeout);
}
__set_current_state(TASK_RUNNING);
if (!timeout) { /* * Lock timed out but check if this task was just * granted lock ownership - if so, pretend there * was no timeout; otherwise, cleanup lock wait.
*/
raw_spin_lock_irq(&sem->wait_lock); if (waiter.task) {
atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
sem->wait_readers--;
list_del(&waiter.list);
raw_spin_unlock_irq(&sem->wait_lock);
put_task_struct(waiter.task); return NULL;
}
raw_spin_unlock_irq(&sem->wait_lock);
}
return sem;
}
/* * wait for the write lock to be granted
*/ staticstruct ld_semaphore __sched *
down_write_failed(struct ld_semaphore *sem, long count, long timeout)
{ struct ldsem_waiter waiter; long adjust = -LDSEM_ACTIVE_BIAS; int locked = 0;
/* set up my own style of waitqueue */
raw_spin_lock_irq(&sem->wait_lock);
/* * Try to reverse the lock attempt but if the count has changed * so that reversing fails, check if the lock is now owned, * and early-out if so.
*/ do { if (atomic_long_try_cmpxchg(&sem->count, &count, count + adjust)) break; if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) {
raw_spin_unlock_irq(&sem->wait_lock); return sem;
}
} while (1);
list_add_tail(&waiter.list, &sem->write_wait);
waiter.task = current;
set_current_state(TASK_UNINTERRUPTIBLE); for (;;) { if (!timeout) break;
raw_spin_unlock_irq(&sem->wait_lock);
timeout = schedule_timeout(timeout);
raw_spin_lock_irq(&sem->wait_lock);
set_current_state(TASK_UNINTERRUPTIBLE);
locked = writer_trylock(sem); if (locked) break;
}
if (!locked)
atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
list_del(&waiter.list);
/* * In case of timeout, wake up every reader who gave the right of way * to writer. Prevent separation readers into two groups: * one that helds semaphore and another that sleeps. * (in case of no contention with a writer)
*/ if (!locked && list_empty(&sem->write_wait))
__ldsem_wake_readers(sem);
raw_spin_unlock_irq(&sem->wait_lock);
__set_current_state(TASK_RUNNING);
/* lock wait may have timed out */ if (!locked) return NULL; return sem;
}
staticint __ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, long timeout)
{ long count;
/* * lock for reading -- returns 1 if successful, 0 if timed out
*/ int __sched ldsem_down_read(struct ld_semaphore *sem, long timeout)
{
might_sleep(); return __ldsem_down_read_nested(sem, 0, timeout);
}
/* * trylock for reading -- returns 1 if successful, 0 if contention
*/ int ldsem_down_read_trylock(struct ld_semaphore *sem)
{ long count = atomic_long_read(&sem->count);
/* * lock for writing -- returns 1 if successful, 0 if timed out
*/ int __sched ldsem_down_write(struct ld_semaphore *sem, long timeout)
{
might_sleep(); return __ldsem_down_write_nested(sem, 0, timeout);
}
/* * release a read lock
*/ void ldsem_up_read(struct ld_semaphore *sem)
{ long count;
/* * release a write lock
*/ void ldsem_up_write(struct ld_semaphore *sem)
{ long count;
rwsem_release(&sem->dep_map, _RET_IP_);
count = atomic_long_add_return(-LDSEM_WRITE_BIAS, &sem->count); if (count < 0)
ldsem_wake(sem);
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, long timeout)
{
might_sleep(); return __ldsem_down_read_nested(sem, subclass, timeout);
}
int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass, long timeout)
{
might_sleep(); return __ldsem_down_write_nested(sem, subclass, timeout);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.