/* * Mark us canceled regardless of ownership. This will prevent a * potential retry from a spurious wakeup.
*/
atomic_or(IO_WAITID_CANCEL_FLAG, &iw->refs);
/* claim ownership */ if (atomic_fetch_inc(&iw->refs) & IO_WAITID_REF_MASK) returnfalse;
if (!atomic_sub_return(1, &iw->refs)) returnfalse;
/* * Wakeup triggered, racing with us. It was prevented from * completing because of that, queue up the tw to do that.
*/
req->io_task_work.func = io_waitid_cb;
io_req_task_work_add(req);
remove_wait_queue(iw->head, &iwa->wo.child_wait); returntrue;
}
/* * If we get -ERESTARTSYS here, we need to re-arm and check again * to ensure we get another callback. If the retry works, then we can * just remove ourselves from the waitqueue again and finish the * request.
*/ if (unlikely(ret == -ERESTARTSYS)) { struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
/* Don't retry if cancel found it meanwhile */
ret = -ECANCELED; if (!(atomic_read(&iw->refs) & IO_WAITID_CANCEL_FLAG)) {
iw->head = ¤t->signal->wait_chldexit;
add_wait_queue(iw->head, &iwa->wo.child_wait);
ret = __do_wait(&iwa->wo); if (ret == -ERESTARTSYS) { /* retry armed, drop our ref */
io_waitid_drop_issue_ref(req); return;
}
ret = kernel_waitid_prepare(&iwa->wo, iw->which, iw->upid, &iw->info,
iw->options, NULL); if (ret) goto done;
/* * Mark the request as busy upfront, in case we're racing with the * wakeup. If we are, then we'll notice when we drop this initial * reference again after arming.
*/
atomic_set(&iw->refs, 1);
/* * Cancel must hold the ctx lock, so there's no risk of cancelation * finding us until a) we remain on the list, and b) the lock is * dropped. We only need to worry about racing with the wakeup * callback.
*/
io_ring_submit_lock(ctx, issue_flags);
hlist_add_head(&req->hash_node, &ctx->waitid_list);
ret = __do_wait(&iwa->wo); if (ret == -ERESTARTSYS) { /* * Nobody else grabbed a reference, it'll complete when we get * a waitqueue callback, or if someone cancels it.
*/ if (!io_waitid_drop_issue_ref(req)) {
io_ring_submit_unlock(ctx, issue_flags); return IOU_ISSUE_SKIP_COMPLETE;
}
/* * Wakeup triggered, racing with us. It was prevented from * completing because of that, queue up the tw to do that.
*/
io_ring_submit_unlock(ctx, issue_flags); return IOU_ISSUE_SKIP_COMPLETE;
}
hlist_del_init(&req->hash_node);
remove_wait_queue(iw->head, &iwa->wo.child_wait);
ret = io_waitid_finish(req, ret);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.