/* For clearing flags with the same atomic op as a put */ void closure_sub(struct closure *cl, int v)
{
closure_put_after_sub(cl, atomic_sub_return_release(v, &cl->remaining));
}
EXPORT_SYMBOL(closure_sub);
/* * closure_wake_up - wake up all closures on a wait list, without memory barrier
*/ void __closure_wake_up(struct closure_waitlist *wait_list)
{ struct llist_node *list; struct closure *cl, *t; struct llist_node *reverse = NULL;
list = llist_del_all(&wait_list->list);
/* We first reverse the list to preserve FIFO ordering and fairness */
reverse = llist_reverse_order(list);
/* Then do the wakeups */
llist_for_each_entry_safe(cl, t, reverse, list) {
closure_set_waiting(cl, 0);
closure_sub(cl, CLOSURE_WAITING + 1);
}
}
EXPORT_SYMBOL(__closure_wake_up);
/** * closure_wait - add a closure to a waitlist * @waitlist: will own a ref on @cl, which will be released when * closure_wake_up() is called on @waitlist. * @cl: closure pointer. *
*/ bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
{ if (atomic_read(&cl->remaining) & CLOSURE_WAITING) returnfalse;
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE); if (s.done) break; if (!timeout) { /* * Carefully undo the continue_at() - but only if it * hasn't completed, i.e. the final closure_put() hasn't * happened yet:
*/ unsigned old, new, v = atomic_read(&cl->remaining); do {
old = v; if (!old || (old & CLOSURE_RUNNING)) goto success;
new = old + CLOSURE_REMAINING_INITIALIZER;
} while ((v = atomic_cmpxchg(&cl->remaining, old, new)) != old);
ret = -ETIME;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.