/* * Futex flags used to encode options to functions and preserve them across * restarts.
*/ #define FLAGS_SIZE_8 0x0000 #define FLAGS_SIZE_16 0x0001 #define FLAGS_SIZE_32 0x0002 #define FLAGS_SIZE_64 0x0003
#define FLAGS_SIZE_MASK 0x0003
#ifdef CONFIG_MMU # define FLAGS_SHARED 0x0010 #else /* * NOMMU does not have per process address space. Let the compiler optimize * code away.
*/ # define FLAGS_SHARED 0x0000 #endif #define FLAGS_CLOCKRT 0x0020 #define FLAGS_HAS_TIMEOUT 0x0040 #define FLAGS_NUMA 0x0080 #define FLAGS_STRICT 0x0100 #define FLAGS_MPOL 0x0200
staticinlinebool futex_flags_valid(unsignedint flags)
{ /* Only 64bit futexes for 64bit code */ if (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall()) { if ((flags & FLAGS_SIZE_MASK) == FLAGS_SIZE_64) returnfalse;
}
/* Only 32bit futexes are implemented -- for now */ if ((flags & FLAGS_SIZE_MASK) != FLAGS_SIZE_32) returnfalse;
/* * Must be able to represent both FUTEX_NO_NODE and every valid nodeid * in a futex word.
*/ if (flags & FLAGS_NUMA) { int bits = 8 * futex_size(flags);
u64 max = ~0ULL;
max >>= 64 - bits; if (nr_node_ids >= max) returnfalse;
}
/* * Hash buckets are shared by all the futex_keys that hash to the same * location. Each key may have multiple futex_q structures, one for each task * waiting on a futex.
*/ struct futex_hash_bucket {
atomic_t waiters;
spinlock_t lock; struct plist_head chain; struct futex_private_hash *priv;
} ____cacheline_aligned_in_smp;
/* * Priority Inheritance state:
*/ struct futex_pi_state { /* * list of 'owned' pi_state instances - these have to be * cleaned up in do_exit() if the task exits prematurely:
*/ struct list_head list;
/* * The PI object:
*/ struct rt_mutex_base pi_mutex;
/** * struct futex_q - The hashed futex queue entry, one per waiting task * @list: priority-sorted list of tasks waiting on this futex * @task: the task waiting on the futex * @lock_ptr: the hash bucket lock * @wake: the wake handler for this queue * @wake_data: data associated with the wake handler * @key: the key the futex is hashed on * @pi_state: optional priority inheritance state * @rt_waiter: rt_waiter storage for use with requeue_pi * @requeue_pi_key: the requeue_pi target futex key * @bitset: bitset for the optional bitmasked wakeup * @requeue_state: State field for futex_requeue_pi() * @drop_hb_ref: Waiter should drop the extra hash bucket reference if true * @requeue_wait: RCU wait for futex_requeue_pi() (RT only) * * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so * we can wake only the relevant ones (hashed queues may be shared). * * A futex_q has a woken state, just like tasks have TASK_RUNNING. * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0. * The order of wakeup is always to make the first condition true, then * the second. * * PI futexes are typically woken before they are removed from the hash list via * the rt_mutex code. See futex_unqueue_pi().
*/ struct futex_q { struct plist_node list;
pagefault_disable();
ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
pagefault_enable();
return ret;
}
/* * This does a plain atomic user space read, and the user pointer has * already been verified earlier by get_futex_key() to be both aligned * and actually in user space, just like futex_atomic_cmpxchg_inatomic(). * * We still want to avoid any speculation, and while __get_user() is * the traditional model for this, it's actually slower than doing * this manually these days. * * We could just have a per-architecture special function for it, * the same way we do futex_atomic_cmpxchg_inatomic(), but rather * than force everybody to do that, write it out long-hand using * the low-level user-access infrastructure. * * This looks a bit overkill, but generally just results in a couple * of instructions.
*/ static __always_inline int futex_get_value(u32 *dest, u32 __user *from)
{
u32 val;
/** * futex_queue() - Enqueue the futex_q on the futex_hash_bucket * @q: The futex_q to enqueue * @hb: The destination hash bucket * @task: Task queueing this futex * * The hb->lock must be held by the caller, and is released here. A call to * futex_queue() is typically paired with exactly one call to futex_unqueue(). The * exceptions involve the PI related operations, which may use futex_unqueue_pi() * or nothing if the unqueue is done as part of the wake process and the unqueue * state is implicit in the state of woken task (see futex_wait_requeue_pi() for * an example). * * Note that @task may be NULL, for async usage of futexes.
*/ staticinlinevoid futex_queue(struct futex_q *q, struct futex_hash_bucket *hb, struct task_struct *task)
__releases(&hb->lock)
{
__futex_queue(q, hb, task);
spin_unlock(&hb->lock);
}
/* * Reflects a new waiter being added to the waitqueue.
*/ staticinlinevoid futex_hb_waiters_inc(struct futex_hash_bucket *hb)
{ #ifdef CONFIG_SMP
atomic_inc(&hb->waiters); /* * Full barrier (A), see the ordering comment above.
*/
smp_mb__after_atomic(); #endif
}
/* * Reflects a waiter being removed from the waitqueue by wakeup * paths.
*/ staticinlinevoid futex_hb_waiters_dec(struct futex_hash_bucket *hb)
{ #ifdef CONFIG_SMP
atomic_dec(&hb->waiters); #endif
}
staticinlineint futex_hb_waiters_pending(struct futex_hash_bucket *hb)
{ #ifdef CONFIG_SMP /* * Full barrier (B), see the ordering comment above.
*/
smp_mb(); return atomic_read(&hb->waiters); #else return 1; #endif
}
/** * struct futex_vector - Auxiliary struct for futex_waitv() * @w: Userspace provided data * @q: Kernel side data * * Struct used to build an array with all data need for futex_waitv()
*/ struct futex_vector { struct futex_waitv w; struct futex_q q;
};
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.