/* * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve * number) then we wake that number of exclusive tasks, and potentially all * the non-exclusive tasks. Normally, exclusive tasks will be at the end of * the list and any non-exclusive tasks will be woken first. A priority task * may be at the head of the list, and can consume the event without any other * tasks being woken if it's also an exclusive task. * * There are circumstances in which we can try to wake a task which has already * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns * zero in this (rare) case, and we handle it by continuing to scan the queue.
*/ staticint __wake_up_common(struct wait_queue_head *wq_head, unsignedint mode, int nr_exclusive, int wake_flags, void *key)
{
wait_queue_entry_t *curr, *next;
/** * __wake_up - wake up threads blocked on a waitqueue. * @wq_head: the waitqueue * @mode: which threads * @nr_exclusive: how many wake-one or wake-many threads to wake up * @key: is directly passed to the wakeup function * * If this function wakes up a task, it executes a full memory barrier * before accessing the task state. Returns the number of exclusive * tasks that were awaken.
*/ int __wake_up(struct wait_queue_head *wq_head, unsignedint mode, int nr_exclusive, void *key)
{ return __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
}
EXPORT_SYMBOL(__wake_up);
/* * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
*/ void __wake_up_locked(struct wait_queue_head *wq_head, unsignedint mode, int nr)
{
__wake_up_common(wq_head, mode, nr, 0, NULL);
}
EXPORT_SYMBOL_GPL(__wake_up_locked);
/** * __wake_up_sync_key - wake up threads blocked on a waitqueue. * @wq_head: the waitqueue * @mode: which threads * @key: opaque value to be passed to wakeup targets * * The sync wakeup differs that the waker knows that it will schedule * away soon, so while the target thread will be woken up, it will not * be migrated to another CPU - ie. the two threads are 'synchronized' * with each other. This can prevent needless bouncing between CPUs. * * On UP it can prevent extra preemption. * * If this function wakes up a task, it executes a full memory barrier before * accessing the task state.
*/ void __wake_up_sync_key(struct wait_queue_head *wq_head, unsignedint mode, void *key)
{ if (unlikely(!wq_head)) return;
/** * __wake_up_locked_sync_key - wake up a thread blocked on a locked waitqueue. * @wq_head: the waitqueue * @mode: which threads * @key: opaque value to be passed to wakeup targets * * The sync wakeup differs in that the waker knows that it will schedule * away soon, so while the target thread will be woken up, it will not * be migrated to another CPU - ie. the two threads are 'synchronized' * with each other. This can prevent needless bouncing between CPUs. * * On UP it can prevent extra preemption. * * If this function wakes up a task, it executes a full memory barrier before * accessing the task state.
*/ void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsignedint mode, void *key)
{
__wake_up_common(wq_head, mode, 1, WF_SYNC, key);
}
EXPORT_SYMBOL_GPL(__wake_up_locked_sync_key);
/* * __wake_up_sync - see __wake_up_sync_key()
*/ void __wake_up_sync(struct wait_queue_head *wq_head, unsignedint mode)
{
__wake_up_sync_key(wq_head, mode, NULL);
}
EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
void __wake_up_pollfree(struct wait_queue_head *wq_head)
{
__wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE)); /* POLLFREE must have cleared the queue. */
WARN_ON_ONCE(waitqueue_active(wq_head));
}
/* * Note: we use "set_current_state()" _after_ the wait-queue add, * because we need a memory barrier there on SMP, so that any * wake-function that tests for the wait-queue being active * will be guaranteed to see waitqueue addition _or_ subsequent * tests in this thread will see the wakeup having taken place. * * The spin_unlock() itself is semi-permeable and only protects * one way (it only protects stuff inside the critical region and * stops them from bleeding out - it would still allow subsequent * loads to move into the critical region).
*/ void
prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
{ unsignedlong flags;
/* Returns true if we are the first waiter in the queue, false otherwise. */ bool
prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
{ unsignedlong flags; bool was_empty = false;
long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
{ unsignedlong flags; long ret = 0;
spin_lock_irqsave(&wq_head->lock, flags); if (signal_pending_state(state, current)) { /* * Exclusive waiter must not fail if it was selected by wakeup, * it should "consume" the condition we were waiting for. * * The caller will recheck the condition and return success if * we were already woken up, we can not miss the event because * wakeup locks/unlocks the same wq_head->lock. * * But we need to ensure that set-condition + wakeup after that * can't see us, it should wake up another exclusive waiter if * we fail.
*/
list_del_init(&wq_entry->entry);
ret = -ERESTARTSYS;
} else { if (list_empty(&wq_entry->entry)) { if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
__add_wait_queue_entry_tail(wq_head, wq_entry); else
__add_wait_queue(wq_head, wq_entry);
}
set_current_state(state);
}
spin_unlock_irqrestore(&wq_head->lock, flags);
/* * Note! These two wait functions are entered with the * wait-queue lock held (and interrupts off in the _irq * case), so there is no race with testing the wakeup * condition in the caller before they add the wait * entry to the wake queue.
*/ int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
{ if (likely(list_empty(&wait->entry)))
__add_wait_queue_entry_tail(wq, wait);
set_current_state(TASK_INTERRUPTIBLE); if (signal_pending(current)) return -ERESTARTSYS;
/** * finish_wait - clean up after waiting in a queue * @wq_head: waitqueue waited on * @wq_entry: wait descriptor * * Sets current thread back to running state and removes * the wait descriptor from the given waitqueue if still * queued.
*/ void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
{ unsignedlong flags;
__set_current_state(TASK_RUNNING); /* * We can check for list emptiness outside the lock * IFF: * - we use the "careful" check that verifies both * the next and prev pointers, so that there cannot * be any half-pending updates in progress on other * CPU's that we haven't seen yet (and that might * still change the stack area. * and * - all other users take the lock (ie we can only * have _one_ other CPU that looks at or modifies * the list).
*/ if (!list_empty_careful(&wq_entry->entry)) {
spin_lock_irqsave(&wq_head->lock, flags);
list_del_init(&wq_entry->entry);
spin_unlock_irqrestore(&wq_head->lock, flags);
}
}
EXPORT_SYMBOL(finish_wait);
int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
{ int ret = default_wake_function(wq_entry, mode, sync, key);
/* * DEFINE_WAIT_FUNC(wait, woken_wake_func); * * add_wait_queue(&wq_head, &wait); * for (;;) { * if (condition) * break; * * // in wait_woken() // in woken_wake_function() * * p->state = mode; wq_entry->flags |= WQ_FLAG_WOKEN; * smp_mb(); // A try_to_wake_up(): * if (!(wq_entry->flags & WQ_FLAG_WOKEN)) <full barrier> * schedule() if (p->state & mode) * p->state = TASK_RUNNING; p->state = TASK_RUNNING; * wq_entry->flags &= ~WQ_FLAG_WOKEN; ~~~~~~~~~~~~~~~~~~ * smp_mb(); // B condition = true; * } smp_mb(); // C * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN;
*/ long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
{ /* * The below executes an smp_mb(), which matches with the full barrier * executed by the try_to_wake_up() in woken_wake_function() such that * either we see the store to wq_entry->flags in woken_wake_function() * or woken_wake_function() sees our store to current->state.
*/
set_current_state(mode); /* A */ if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !kthread_should_stop_or_park())
timeout = schedule_timeout(timeout);
__set_current_state(TASK_RUNNING);
/* * The below executes an smp_mb(), which matches with the smp_mb() (C) * in woken_wake_function() such that either we see the wait condition * being true or the store to wq_entry->flags in woken_wake_function() * follows ours in the coherence order.
*/
smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
return timeout;
}
EXPORT_SYMBOL(wait_woken);
int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void*key)
{ /* Pairs with the smp_store_mb() in wait_woken(). */
smp_mb(); /* C */
wq_entry->flags |= WQ_FLAG_WOKEN;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.