// SPDX-License-Identifier: GPL-2.0-or-later /* * Fast Userspace Mutexes (which I call "Futexes!"). * (C) Rusty Russell, IBM 2002 * * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar * (C) Copyright 2003 Red Hat Inc, All Rights Reserved * * Removed page pinning, fix privately mapped COW pages and other cleanups * (C) Copyright 2003, 2004 Jamie Lokier * * Robust futex support started by Ingo Molnar * (C) Copyright 2006 Red Hat Inc, All Rights Reserved * Thanks to Thomas Gleixner for suggestions, analysis and fixes. * * PI-futex support started by Ingo Molnar and Thomas Gleixner * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> * * PRIVATE futexes by Eric Dumazet * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com> * * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com> * Copyright (C) IBM Corporation, 2009 * Thanks to Thomas Gleixner for conceptual design and careful reviews. * * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly * enough at me, Linus for the original (flawed) idea, Matthew * Kirkwood for proof-of-concept implementation. * * "The futexes are also cursed." * "But they come in a choice of three flavours!"
*/ #include <linux/compat.h> #include <linux/jhash.h> #include <linux/pagemap.h> #include <linux/debugfs.h> #include <linux/plist.h> #include <linux/gfp.h> #include <linux/vmalloc.h> #include <linux/memblock.h> #include <linux/fault-inject.h> #include <linux/slab.h> #include <linux/prctl.h> #include <linux/mempolicy.h> #include <linux/mmap_lock.h>
/* * The base of the bucket array and its size are always used together * (after initialization only in futex_hash()), so ensure that they * reside in the same cacheline.
*/ staticstruct { unsignedlong hashmask; unsignedint hashshift; struct futex_hash_bucket *queues[MAX_NUMNODES];
} __futex_data __read_mostly __aligned(2*sizeof(long));
staticinlinebool futex_key_is_private(union futex_key *key)
{ /* * Relies on get_futex_key() to set either bit for shared * futexes -- see comment with union futex_key.
*/ return !(key->both.offset & (FUT_OFF_INODE | FUT_OFF_MMSHARED));
}
void futex_private_hash_put(struct futex_private_hash *fph)
{ if (futex_ref_put(fph))
wake_up_var(fph->mm);
}
/** * futex_hash_get - Get an additional reference for the local hash. * @hb: ptr to the private local hash. * * Obtain an additional reference for the already obtained hash bucket. The * caller must already own an reference.
*/ void futex_hash_get(struct futex_hash_bucket *hb)
{ struct futex_private_hash *fph = hb->priv;
if (!fph) return;
WARN_ON_ONCE(!futex_private_hash_get(fph));
}
hb_new = __futex_hash(&this->key, new);
futex_hb_waiters_inc(hb_new); /* * The new pointer isn't published yet but an already * moved user can be unqueued due to timeout or signal.
*/
spin_lock_nested(&hb_new->lock, SINGLE_DEPTH_NESTING);
plist_add(&this->list, &hb_new->chain);
this->lock_ptr = &hb_new->lock;
spin_unlock(&hb_new->lock);
}
spin_unlock(&hb_old->lock);
}
}
struct futex_private_hash *futex_private_hash(void)
{ struct mm_struct *mm = current->mm; /* * Ideally we don't loop. If there is a replacement in progress * then a new private hash is already prepared and a reference can't be * obtained once the last user dropped it's. * In that case we block on mm_struct::futex_hash_lock and either have * to perform the replacement or wait while someone else is doing the * job. Eitherway, on the second iteration we acquire a reference on the * new private hash or loop again because a new replacement has been * requested.
*/
again:
scoped_guard(rcu) { struct futex_private_hash *fph;
fph = rcu_dereference(mm->futex_phash); if (!fph) return NULL;
if (futex_private_hash_get(fph)) return fph;
}
futex_pivot_hash(mm); goto again;
}
/** * __futex_hash - Return the hash bucket * @key: Pointer to the futex key for which the hash is calculated * @fph: Pointer to private hash if known * * We hash on the keys returned from get_futex_key (see below) and return the * corresponding hash bucket. * If the FUTEX is PROCESS_PRIVATE then a per-process hash bucket (from the * private hash) is returned if existing. Otherwise a hash bucket from the * global hash is returned.
*/ staticstruct futex_hash_bucket *
__futex_hash(union futex_key *key, struct futex_private_hash *fph)
{ int node = key->both.node;
u32 hash;
if (node == FUTEX_NO_NODE) { struct futex_hash_bucket *hb;
hb = __futex_hash_private(key, fph); if (hb) return hb;
}
if (node == FUTEX_NO_NODE) { /* * In case of !FLAGS_NUMA, use some unused hash bits to pick a * node -- this ensures regular futexes are interleaved across * the nodes and avoids having to allocate multiple * hash-tables. * * NOTE: this isn't perfectly uniform, but it is fast and * handles sparse node masks.
*/
node = (hash >> futex_hashshift) % nr_node_ids; if (!node_possible(node)) {
node = find_next_bit_wrap(node_possible_map.bits,
nr_node_ids, node);
}
}
/** * futex_setup_timer - set up the sleeping hrtimer. * @time: ptr to the given timeout value * @timeout: the hrtimer_sleeper structure to be set up * @flags: futex flags * @range_ns: optional range in ns * * Return: Initialized hrtimer_sleeper structure or NULL if no timeout * value given
*/ struct hrtimer_sleeper *
futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout, int flags, u64 range_ns)
{ if (!time) return NULL;
hrtimer_setup_sleeper_on_stack(timeout,
(flags & FLAGS_CLOCKRT) ? CLOCK_REALTIME : CLOCK_MONOTONIC,
HRTIMER_MODE_ABS); /* * If range_ns is 0, calling hrtimer_set_expires_range_ns() is * effectively the same as calling hrtimer_set_expires().
*/
hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns);
return timeout;
}
/* * Generate a machine wide unique identifier for this inode. * * This relies on u64 not wrapping in the life-time of the machine; which with * 1ns resolution means almost 585 years. * * This further relies on the fact that a well formed program will not unmap * the file while it has a (shared) futex waiting on it. This mapping will have * a file reference which pins the mount and inode. * * If for some reason an inode gets evicted and read back in again, it will get * a new sequence number and will _NOT_ match, even though it is the exact same * file. * * It is important that futex_match() will never have a false-positive, esp. * for PI futexes that can mess up the state. The above argues that false-negatives * are only possible for malformed programs.
*/ static u64 get_inode_sequence_number(struct inode *inode)
{ static atomic64_t i_seq;
u64 old;
/* Does the inode already have a sequence number? */
old = atomic64_read(&inode->i_sequence); if (likely(old)) return old;
for (;;) {
u64 new = atomic64_inc_return(&i_seq); if (WARN_ON_ONCE(!new)) continue;
old = 0; if (!atomic64_try_cmpxchg_relaxed(&inode->i_sequence, &old, new)) return old; returnnew;
}
}
/** * get_futex_key() - Get parameters which are the keys for a futex * @uaddr: virtual address of the futex * @flags: FLAGS_* * @key: address where result is stored. * @rw: mapping needs to be read/write (values: FUTEX_READ, * FUTEX_WRITE) * * Return: a negative error code or 0 * * The key words are stored in @key on success. * * For shared mappings (when @fshared), the key is: * * ( inode->i_sequence, page offset within mapping, offset_within_page ) * * [ also see get_inode_sequence_number() ] * * For private mappings (or when !@fshared), the key is: * * ( current->mm, address, 0 ) * * This allows (cross process, where applicable) identification of the futex * without keeping the page pinned for the duration of the FUTEX_WAIT. * * lock_page() might sleep, the caller should not hold a spinlock.
*/ int get_futex_key(u32 __user *uaddr, unsignedint flags, union futex_key *key, enum futex_access rw)
{ unsignedlong address = (unsignedlong)uaddr; struct mm_struct *mm = current->mm; struct page *page; struct folio *folio; struct address_space *mapping; int node, err, size, ro = 0; bool node_updated = false; bool fshared;
if (node == FUTEX_NO_NODE) {
node = numa_node_id();
node_updated = true;
} if (node_updated && futex_put_value(node, naddr)) return -EFAULT;
}
key->both.node = node;
/* * PROCESS_PRIVATE futexes are fast. * As the mm cannot disappear under us and the 'key' only needs * virtual address, we dont even have to find the underlying vma. * Note : We do have to check 'uaddr' is a valid user address, * but access_ok() should be faster than find_vma()
*/ if (!fshared) { /* * On no-MMU, shared futexes are treated as private, therefore * we must not include the current process in the key. Since * there is only one address space, the address is a unique key * on its own.
*/ if (IS_ENABLED(CONFIG_MMU))
key->private.mm = mm; else
key->private.mm = NULL;
key->private.address = address; return 0;
}
again: /* Ignore any VERIFY_READ mapping (futex common case) */ if (unlikely(should_fail_futex(true))) return -EFAULT;
err = get_user_pages_fast(address, 1, FOLL_WRITE, &page); /* * If write access is not required (eg. FUTEX_WAIT), try * and get read-only access.
*/ if (err == -EFAULT && rw == FUTEX_READ) {
err = get_user_pages_fast(address, 1, 0, &page);
ro = 1;
} if (err < 0) return err; else
err = 0;
/* * The treatment of mapping from this point on is critical. The folio * lock protects many things but in this context the folio lock * stabilizes mapping, prevents inode freeing in the shared * file-backed region case and guards against movement to swap cache. * * Strictly speaking the folio lock is not needed in all cases being * considered here and folio lock forces unnecessarily serialization. * From this point on, mapping will be re-verified if necessary and * folio lock will be acquired only if it is unavoidable * * Mapping checks require the folio so it is looked up now. For * anonymous pages, it does not matter if the folio is split * in the future as the key is based on the address. For * filesystem-backed pages, the precise page is required as the * index of the page determines the key.
*/
folio = page_folio(page);
mapping = READ_ONCE(folio->mapping);
/* * If folio->mapping is NULL, then it cannot be an anonymous * page; but it might be the ZERO_PAGE or in the gate area or * in a special mapping (all cases which we are happy to fail); * or it may have been a good file page when get_user_pages_fast * found it, but truncated or holepunched or subjected to * invalidate_complete_page2 before we got the folio lock (also * cases which we are happy to fail). And we hold a reference, * so refcount care in invalidate_inode_page's remove_mapping * prevents drop_caches from setting mapping to NULL beneath us. * * The case we do have to guard against is when memory pressure made * shmem_writepage move it from filecache to swapcache beneath us: * an unlikely race, but we do need to retry for folio->mapping.
*/ if (unlikely(!mapping)) { int shmem_swizzled;
/* * Folio lock is required to identify which special case above * applies. If this is really a shmem page then the folio lock * will prevent unexpected transitions.
*/
folio_lock(folio);
shmem_swizzled = folio_test_swapcache(folio) || folio->mapping;
folio_unlock(folio);
folio_put(folio);
if (shmem_swizzled) goto again;
return -EFAULT;
}
/* * Private mappings are handled in a simple way. * * If the futex key is stored in anonymous memory, then the associated * object is the mm which is implicitly pinned by the calling process. * * NOTE: When userspace waits on a MAP_SHARED mapping, even if * it's a read-only handle, it's expected that futexes attach to * the object not the particular process.
*/ if (folio_test_anon(folio)) { /* * A RO anonymous page will never change and thus doesn't make * sense for futex operations.
*/ if (unlikely(should_fail_futex(true)) || ro) {
err = -EFAULT; goto out;
}
key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
key->private.mm = mm;
key->private.address = address;
} else { struct inode *inode;
/* * The associated futex object in this case is the inode and * the folio->mapping must be traversed. Ordinarily this should * be stabilised under folio lock but it's not strictly * necessary in this case as we just want to pin the inode, not * update i_pages or anything like that. * * The RCU read lock is taken as the inode is finally freed * under RCU. If the mapping still matches expectations then the * mapping->host can be safely accessed as being a valid inode.
*/
rcu_read_lock();
if (READ_ONCE(folio->mapping) != mapping) {
rcu_read_unlock();
folio_put(folio);
goto again;
}
inode = READ_ONCE(mapping->host); if (!inode) {
rcu_read_unlock();
folio_put(folio);
/** * fault_in_user_writeable() - Fault in user address and verify RW access * @uaddr: pointer to faulting user space address * * Slow path to fixup the fault we just took in the atomic write * access to @uaddr. * * We have no generic implementation of a non-destructive write to the * user address. We know that we faulted in the atomic pagefault * disabled section so we can as well avoid the #PF overhead by * calling get_user_pages() right away.
*/ int fault_in_user_writeable(u32 __user *uaddr)
{ struct mm_struct *mm = current->mm; int ret;
mmap_read_lock(mm);
ret = fixup_user_fault(mm, (unsignedlong)uaddr,
FAULT_FLAG_WRITE, NULL);
mmap_read_unlock(mm);
return ret < 0 ? ret : 0;
}
/** * futex_top_waiter() - Return the highest priority waiter on a futex * @hb: the hash bucket the futex_q's reside in * @key: the futex key (to distinguish it from other futex futex_q's) * * Must be called with the hb lock held.
*/ struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key)
{ struct futex_q *this;
/** * wait_for_owner_exiting - Block until the owner has exited * @ret: owner's current futex lock status * @exiting: Pointer to the exiting task * * Caller must hold a refcount on @exiting.
*/ void wait_for_owner_exiting(int ret, struct task_struct *exiting)
{ if (ret != -EBUSY) {
WARN_ON_ONCE(exiting); return;
}
if (WARN_ON_ONCE(ret == -EBUSY && !exiting)) return;
mutex_lock(&exiting->futex_exit_mutex); /* * No point in doing state checking here. If the waiter got here * while the task was in exec()->exec_futex_release() then it can * have any FUTEX_STATE_* value when the waiter has acquired the * mutex. OK, if running, EXITING or DEAD if it reached exit() * already. Highly unlikely and not a problem. Just one more round * through the futex maze.
*/
mutex_unlock(&exiting->futex_exit_mutex);
put_task_struct(exiting);
}
/** * __futex_unqueue() - Remove the futex_q from its futex_hash_bucket * @q: The futex_q to unqueue * * The q->lock_ptr must not be NULL and must be held by the caller.
*/ void __futex_unqueue(struct futex_q *q)
{ struct futex_hash_bucket *hb;
if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list))) return;
lockdep_assert_held(q->lock_ptr);
/* The key must be already stored in q->key. */ void futex_q_lock(struct futex_q *q, struct futex_hash_bucket *hb)
__acquires(&hb->lock)
{ /* * Increment the counter before taking the lock so that * a potential waker won't miss a to-be-slept task that is * waiting for the spinlock. This is safe as all futex_q_lock() * users end up calling futex_queue(). Similarly, for housekeeping, * decrement the counter at futex_q_unlock() when some error has * occurred and we don't end up adding the task to the list.
*/
futex_hb_waiters_inc(hb); /* implies smp_mb(); (A) */
/* * The priority used to register this element is * - either the real thread-priority for the real-time threads * (i.e. threads with a priority lower than MAX_RT_PRIO) * - or MAX_RT_PRIO for non-RT threads. * Thus, all RT-threads are woken first in priority order, and * the others are woken last, in FIFO order.
*/
prio = min(current->normal_prio, MAX_RT_PRIO);
/** * futex_unqueue() - Remove the futex_q from its futex_hash_bucket * @q: The futex_q to unqueue * * The q->lock_ptr must not be held by the caller. A call to futex_unqueue() must * be paired with exactly one earlier call to futex_queue(). * * Return: * - 1 - if the futex_q was still queued (and we removed unqueued it); * - 0 - if the futex_q was already removed by the waking thread
*/ int futex_unqueue(struct futex_q *q)
{
spinlock_t *lock_ptr; int ret = 0;
/* RCU so lock_ptr is not going away during locking. */
guard(rcu)(); /* In the common case we don't take the spinlock, which is nice. */
retry: /* * q->lock_ptr can change between this read and the following spin_lock. * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and * optimizing lock_ptr out of the logic below.
*/
lock_ptr = READ_ONCE(q->lock_ptr); if (lock_ptr != NULL) {
spin_lock(lock_ptr); /* * q->lock_ptr can change between reading it and * spin_lock(), causing us to take the wrong lock. This * corrects the race condition. * * Reasoning goes like this: if we have the wrong lock, * q->lock_ptr must have changed (maybe several times) * between reading it and the spin_lock(). It can * change again after the spin_lock() but only if it was * already changed before the spin_lock(). It cannot, * however, change back to the original value. Therefore * we can detect whether we acquired the correct lock.
*/ if (unlikely(lock_ptr != q->lock_ptr)) {
spin_unlock(lock_ptr); goto retry;
}
__futex_unqueue(q);
/* * See futex_unqueue() why lock_ptr can change.
*/
guard(rcu)();
retry:
lock_ptr = READ_ONCE(q->lock_ptr);
spin_lock(lock_ptr);
if (unlikely(lock_ptr != q->lock_ptr)) {
spin_unlock(lock_ptr); goto retry;
}
}
/* * PI futexes can not be requeued and must remove themselves from the hash * bucket. The hash bucket lock (i.e. lock_ptr) is held.
*/ void futex_unqueue_pi(struct futex_q *q)
{ /* * If the lock was not acquired (due to timeout or signal) then the * rt_waiter is removed before futex_q is. If this is observed by * an unlocker after dropping the rtmutex wait lock and before * acquiring the hash bucket lock, then the unlocker dequeues the * futex_q from the hash bucket list to guarantee consistent state * vs. userspace. Therefore the dequeue here must be conditional.
*/ if (!plist_node_empty(&q->list))
__futex_unqueue(q);
/* Constants for the pending_op argument of handle_futex_death */ #define HANDLE_DEATH_PENDING true #define HANDLE_DEATH_LIST false
/* * Process a futex-list entry, check whether it's owned by the * dying task, and do notification if so:
*/ staticint handle_futex_death(u32 __user *uaddr, struct task_struct *curr, bool pi, bool pending_op)
{
u32 uval, nval, mval;
pid_t owner; int err;
/* Futex address must be 32bit aligned */ if ((((unsignedlong)uaddr) % sizeof(*uaddr)) != 0) return -1;
retry: if (get_user(uval, uaddr)) return -1;
/* * Special case for regular (non PI) futexes. The unlock path in * user space has two race scenarios: * * 1. The unlock path releases the user space futex value and * before it can execute the futex() syscall to wake up * waiters it is killed. * * 2. A woken up waiter is killed before it can acquire the * futex in user space. * * In the second case, the wake up notification could be generated * by the unlock path in user space after setting the futex value * to zero or by the kernel after setting the OWNER_DIED bit below. * * In both cases the TID validation below prevents a wakeup of * potential waiters which can cause these waiters to block * forever. * * In both cases the following conditions are met: * * 1) task->robust_list->list_op_pending != NULL * @pending_op == true * 2) The owner part of user space futex value == 0 * 3) Regular futex: @pi == false * * If these conditions are met, it is safe to attempt waking up a * potential waiter without touching the user space futex value and * trying to set the OWNER_DIED bit. If the futex value is zero, * the rest of the user space mutex state is consistent, so a woken * waiter will just take over the uncontended futex. Setting the * OWNER_DIED bit would create inconsistent state and malfunction * of the user space owner died handling. Otherwise, the OWNER_DIED * bit is already set, and the woken waiter is expected to deal with * this.
*/
owner = uval & FUTEX_TID_MASK;
/* * Ok, this dying thread is truly holding a futex * of interest. Set the OWNER_DIED bit atomically * via cmpxchg, and if the value had FUTEX_WAITERS * set, wake up a waiter (if any). (We have to do a * futex_wake() even if OWNER_DIED is already set - * to handle the rare but possible case of recursive * thread-death.) The rest of the cleanup is done in * userspace.
*/
mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
/* * We are not holding a lock here, but we want to have * the pagefault_disable/enable() protection because * we want to handle the fault gracefully. If the * access fails we try to fault in the futex with R/W * verification via get_user_pages. get_user() above * does not guarantee R/W access. If that fails we * give up and leave the futex locked.
*/ if ((err = futex_cmpxchg_value_locked(&nval, uaddr, uval, mval))) { switch (err) { case -EFAULT: if (fault_in_user_writeable(uaddr)) return -1; goto retry;
case -EAGAIN:
cond_resched(); goto retry;
default:
WARN_ON_ONCE(1); return err;
}
}
if (nval != uval) goto retry;
/* * Wake robust non-PI futexes here. The wakeup of * PI futexes happens in exit_pi_state():
*/ if (!pi && (uval & FUTEX_WAITERS)) {
futex_wake(uaddr, FLAGS_SIZE_32 | FLAGS_SHARED, 1,
FUTEX_BITSET_MATCH_ANY);
}
return 0;
}
/* * Fetch a robust-list pointer. Bit 0 signals PI futexes:
*/ staticinlineint fetch_robust_entry(struct robust_list __user **entry, struct robust_list __user * __user *head, unsignedint *pi)
{ unsignedlong uentry;
if (get_user(uentry, (unsignedlong __user *)head)) return -EFAULT;
/* * Walk curr->robust_list (very carefully, it's a userspace list!) * and mark any locks found there dead, and notify any waiters. * * We silently return on any sign of list-walking problem.
*/ staticvoid exit_robust_list(struct task_struct *curr)
{ struct robust_list_head __user *head = curr->robust_list; struct robust_list __user *entry, *next_entry, *pending; unsignedint limit = ROBUST_LIST_LIMIT, pi, pip; unsignedint next_pi; unsignedlong futex_offset; int rc;
/* * Fetch the list head (which was registered earlier, via * sys_set_robust_list()):
*/ if (fetch_robust_entry(&entry, &head->list.next, &pi)) return; /* * Fetch the relative futex offset:
*/ if (get_user(futex_offset, &head->futex_offset)) return; /* * Fetch any possibly pending lock-add first, and handle it * if it exists:
*/ if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) return;
next_entry = NULL; /* avoid warning with gcc */ while (entry != &head->list) { /* * Fetch the next entry in the list before calling * handle_futex_death:
*/
rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); /* * A pending lock might already be on the list, so * don't process it twice:
*/ if (entry != pending) { if (handle_futex_death((void __user *)entry + futex_offset,
curr, pi, HANDLE_DEATH_LIST)) return;
} if (rc) return;
entry = next_entry;
pi = next_pi; /* * Avoid excessively long or circular lists:
*/ if (!--limit) break;
/* * Walk curr->robust_list (very carefully, it's a userspace list!) * and mark any locks found there dead, and notify any waiters. * * We silently return on any sign of list-walking problem.
*/ staticvoid compat_exit_robust_list(struct task_struct *curr)
{ struct compat_robust_list_head __user *head = curr->compat_robust_list; struct robust_list __user *entry, *next_entry, *pending; unsignedint limit = ROBUST_LIST_LIMIT, pi, pip; unsignedint next_pi;
compat_uptr_t uentry, next_uentry, upending;
compat_long_t futex_offset; int rc;
/* * Fetch the list head (which was registered earlier, via * sys_set_robust_list()):
*/ if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) return; /* * Fetch the relative futex offset:
*/ if (get_user(futex_offset, &head->futex_offset)) return; /* * Fetch any possibly pending lock-add first, and handle it * if it exists:
*/ if (compat_fetch_robust_entry(&upending, &pending,
&head->list_op_pending, &pip)) return;
next_entry = NULL; /* avoid warning with gcc */ while (entry != (struct robust_list __user *) &head->list) { /* * Fetch the next entry in the list before calling * handle_futex_death:
*/
rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
(compat_uptr_t __user *)&entry->next, &next_pi); /* * A pending lock might already be on the list, so * dont process it twice:
*/ if (entry != pending) { void __user *uaddr = futex_uaddr(entry, futex_offset);
if (handle_futex_death(uaddr, curr, pi,
HANDLE_DEATH_LIST)) return;
} if (rc) return;
uentry = next_uentry;
entry = next_entry;
pi = next_pi; /* * Avoid excessively long or circular lists:
*/ if (!--limit) break;
/* * This task is holding PI mutexes at exit time => bad. * Kernel cleans up PI-state, but userspace is likely hosed. * (Robust-futex cleanup is separate and might save the day for userspace.)
*/ staticvoid exit_pi_state_list(struct task_struct *curr)
{ struct list_head *next, *head = &curr->pi_state_list; struct futex_pi_state *pi_state; union futex_key key = FUTEX_KEY_INIT;
/* * The mutex mm_struct::futex_hash_lock might be acquired.
*/
might_sleep(); /* * Ensure the hash remains stable (no resize) during the while loop * below. The hb pointer is acquired under the pi_lock so we can't block * on the mutex.
*/
WARN_ON(curr != current);
guard(private_hash)(); /* * We are a ZOMBIE and nobody can enqueue itself on * pi_state_list anymore, but we have to be careful * versus waiters unqueueing themselves:
*/
raw_spin_lock_irq(&curr->pi_lock); while (!list_empty(head)) {
next = head->next;
pi_state = list_entry(next, struct futex_pi_state, list);
key = pi_state->key; if (1) { CLASS(hb, hb)(&key);
/* * We can race against put_pi_state() removing itself from the * list (a waiter going away). put_pi_state() will first * decrement the reference count and then modify the list, so * its possible to see the list entry but fail this reference * acquire. * * In that case; drop the locks to let put_pi_state() make * progress and retry the loop.
*/ if (!refcount_inc_not_zero(&pi_state->refcount)) {
raw_spin_unlock_irq(&curr->pi_lock);
cpu_relax();
raw_spin_lock_irq(&curr->pi_lock); continue;
}
raw_spin_unlock_irq(&curr->pi_lock);
spin_lock(&hb->lock);
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
raw_spin_lock(&curr->pi_lock); /* * We dropped the pi-lock, so re-check whether this * task still owns the PI-state:
*/ if (head->next != next) { /* retain curr->pi_lock for the loop invariant */
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
spin_unlock(&hb->lock);
put_pi_state(pi_state); continue;
}
if (unlikely(!list_empty(&tsk->pi_state_list)))
exit_pi_state_list(tsk);
}
/** * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD * @tsk: task to set the state on * * Set the futex exit state of the task lockless. The futex waiter code * observes that state when a task is exiting and loops until the task has * actually finished the futex cleanup. The worst case for this is that the * waiter runs through the wait loop until the state becomes visible. * * This is called from the recursive fault handling path in make_task_dead(). * * This is best effort. Either the futex exit code has run already or * not. If the OWNER_DIED bit has been set on the futex then the waiter can * take it over. If not, the problem is pushed back to user space. If the * futex exit code did not run yet, then an already queued waiter might * block forever, but there is nothing which can be done about that.
*/ void futex_exit_recursive(struct task_struct *tsk)
{ /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */ if (tsk->futex_state == FUTEX_STATE_EXITING)
mutex_unlock(&tsk->futex_exit_mutex);
tsk->futex_state = FUTEX_STATE_DEAD;
}
staticvoid futex_cleanup_begin(struct task_struct *tsk)
{ /* * Prevent various race issues against a concurrent incoming waiter * including live locks by forcing the waiter to block on * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in * attach_to_pi_owner().
*/
mutex_lock(&tsk->futex_exit_mutex);
/* * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock. * * This ensures that all subsequent checks of tsk->futex_state in * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with * tsk->pi_lock held. * * It guarantees also that a pi_state which was queued right before * the state change under tsk->pi_lock by a concurrent waiter must * be observed in exit_pi_state_list().
*/
raw_spin_lock_irq(&tsk->pi_lock);
tsk->futex_state = FUTEX_STATE_EXITING;
raw_spin_unlock_irq(&tsk->pi_lock);
}
staticvoid futex_cleanup_end(struct task_struct *tsk, int state)
{ /* * Lockless store. The only side effect is that an observer might * take another loop until it becomes visible.
*/
tsk->futex_state = state; /* * Drop the exit protection. This unblocks waiters which observed * FUTEX_STATE_EXITING to reevaluate the state.
*/
mutex_unlock(&tsk->futex_exit_mutex);
}
void futex_exec_release(struct task_struct *tsk)
{ /* * The state handling is done for consistency, but in the case of * exec() there is no way to prevent further damage as the PID stays * the same. But for the unlikely and arguably buggy case that a * futex is held on exec(), this provides at least as much state * consistency protection which is possible.
*/
futex_cleanup_begin(tsk);
futex_cleanup(tsk); /* * Reset the state to FUTEX_STATE_OK. The task is alive and about * exec a new binary.
*/
futex_cleanup_end(tsk, FUTEX_STATE_OK);
}
/* * futex-ref * * Heavily inspired by percpu-rwsem/percpu-refcount; not reusing any of that * code because it just doesn't fit right. * * Dual counter, per-cpu / atomic approach like percpu-refcount, except it * re-initializes the state automatically, such that the fph swizzle is also a * transition back to per-cpu.
*/
/* * The counter we're about to switch to must have fully switched; * otherwise it would be impossible for it to have reported success * from futex_ref_is_dead().
*/
WARN_ON_ONCE(atomic_long_read(&mm->futex_atomic) != 0);
/* * Set the atomic to the bias value such that futex_ref_{get,put}() * will never observe 0. Will be fixed up in __futex_ref_atomic_end() * when folding in the percpu count.
*/
atomic_long_set(&mm->futex_atomic, LONG_MAX);
smp_store_release(&fph->state, FR_ATOMIC);
call_rcu_hurry(&mm->futex_rcu, futex_ref_rcu);
}
staticvoid __futex_ref_atomic_end(struct futex_private_hash *fph)
{ struct mm_struct *mm = fph->mm; unsignedint count = 0; long ret; int cpu;
/* * Per __futex_ref_atomic_begin() the state of the fph must be ATOMIC * and per this RCU callback, everybody must now observe this state and * use the atomic variable.
*/
WARN_ON_ONCE(fph->state != FR_ATOMIC);
/* * Therefore the per-cpu counter is now stable, sum and reset.
*/
for_each_possible_cpu(cpu) { unsignedint *ptr = per_cpu_ptr(mm->futex_ref, cpu);
count += *ptr;
*ptr = 0;
}
/* * Re-init for the next cycle.
*/
this_cpu_inc(*mm->futex_ref); /* 0 -> 1 */
/* * Add actual count, subtract bias and initial refcount. * * The moment this atomic operation happens, futex_ref_is_dead() can * become true.
*/
ret = atomic_long_add_return(count - LONG_MAX - 1, &mm->futex_atomic); if (!ret)
wake_up_var(mm);
if (fph->state == FR_PERCPU) { /* * Per this extra grace-period, everybody must now observe * fph as the current fph and no previously observed fph's * are in-flight. * * Notably, nobody will now rely on the atomic * futex_ref_is_dead() state anymore so we can begin the * migration of the per-cpu counter into the atomic.
*/
__futex_ref_atomic_begin(fph); return;
}
__futex_ref_atomic_end(fph);
}
/* * Drop the initial refcount and transition to atomics.
*/ staticvoid futex_ref_drop(struct futex_private_hash *fph)
{ struct mm_struct *mm = fph->mm;
/* * Can only transition the current fph;
*/
WARN_ON_ONCE(rcu_dereference_raw(mm->futex_phash) != fph); /* * We enqueue at least one RCU callback. Ensure mm stays if the task * exits before the transition is completed.
*/
mmget(mm);
/* * In order to avoid the following scenario: * * futex_hash() __futex_pivot_hash() * guard(rcu); guard(mm->futex_hash_lock); * fph = mm->futex_phash; * rcu_assign_pointer(&mm->futex_phash, new); * futex_hash_allocate() * futex_ref_drop() * fph->state = FR_ATOMIC; * atomic_set(, BIAS); * * futex_private_hash_get(fph); // OOPS * * Where an old fph (which is FR_ATOMIC) and should fail on * inc_not_zero, will succeed because a new transition is started and * the atomic is bias'ed away from 0. * * There must be at least one full grace-period between publishing a * new fph and trying to replace it.
*/ if (poll_state_synchronize_rcu(mm->futex_batches)) { /* * There was a grace-period, we can begin now.
*/
__futex_ref_atomic_begin(fph); return;
}
if (hash_slots && (hash_slots == 1 || !is_power_of_2(hash_slots))) return -EINVAL;
/* * Once we've disabled the global hash there is no way back.
*/
scoped_guard(rcu) {
fph = rcu_dereference(mm->futex_phash); if (fph && !fph->hash_mask) { if (custom) return -EBUSY; return 0;
}
}
if (!mm->futex_ref) { /* * This will always be allocated by the first thread and * therefore requires no locking.
*/
mm->futex_ref = alloc_percpu(unsignedint); if (!mm->futex_ref) return -ENOMEM;
this_cpu_inc(*mm->futex_ref); /* 0 -> 1 */
}
cur = rcu_dereference_protected(mm->futex_phash,
lockdep_is_held(&mm->futex_hash_lock)); new = mm->futex_phash_new;
mm->futex_phash_new = NULL;
if (fph) { if (cur && !cur->hash_mask) { /* * If two threads simultaneously request the global * hash then the first one performs the switch, * the second one returns here.
*/
free = fph;
mm->futex_phash_new = new; return -EBUSY;
} if (cur && !new) { /* * If we have an existing hash, but do not yet have * allocated a replacement hash, drop the initial * reference on the existing hash.
*/
futex_ref_drop(cur);
}
if (new) { /* * Two updates raced; throw out the lesser one.
*/ if (futex_hash_less(new, fph)) {
free = new; new = fph;
} else {
free = fph;
}
} else { new = fph;
}
fph = NULL;
}
if (new) { /* * Will set mm->futex_phash_new on failure; * futex_private_hash_get() will try again.
*/ if (!__futex_pivot_hash(mm, new) && custom) goto again;
}
} return 0;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.