/* * The request has more work to do and should be retried. io_uring will * attempt to wait on the file for eligible opcodes, but otherwise * it'll be handed to iowq for blocking execution. It works for normal * requests as well as for the multi shot mode.
*/
IOU_RETRY = -EAGAIN,
/* * Requeue the task_work to restart operations on this request. The * actual value isn't important, should just be not an otherwise * valid error code, yet less than -MAX_ERRNO and valid internally.
*/
IOU_REQUEUE = -3072,
};
/* * Wake up if we have enough events, or if a timeout occurred since we * started waiting. For timeouts, we always want to return to userspace, * regardless of event count.
*/ return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
}
if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
lockdep_assert_held(&ctx->uring_lock);
if (ctx->flags & IORING_SETUP_IOPOLL) {
lockdep_assert_held(&ctx->uring_lock);
} elseif (!ctx->task_complete) {
lockdep_assert_held(&ctx->completion_lock);
} elseif (ctx->submitter_task) { /* * ->submitter_task may be NULL and we can still post a CQE, * if the ring has been setup with IORING_SETUP_R_DISABLED. * Not from an SQE, as those cannot be submitted, but via * updating tagged resources.
*/ if (!percpu_ref_is_dying(&ctx->refs))
lockdep_assert(current == ctx->submitter_task);
} #endif
}
/* * If we can't get a cq entry, userspace overflowed the * submission (by quite a lot). Increment the overflow count in * the ring.
*/ if (unlikely(!io_get_cqe(ctx, &cqe))) returnfalse;
staticinlinevoid io_ring_submit_lock(struct io_ring_ctx *ctx, unsigned issue_flags)
{ /* * "Normal" inline submissions always hold the uring_lock, since we * grab it from the system call. Same is true for the SQPOLL offload. * The only exception is when we've detached the request and issue it * from an async worker thread, grab the lock for that case.
*/ if (unlikely(issue_flags & IO_URING_F_UNLOCKED))
mutex_lock(&ctx->uring_lock);
lockdep_assert_held(&ctx->uring_lock);
}
staticinlinevoid io_commit_cqring(struct io_ring_ctx *ctx)
{ /* order cqe stores with ring update */
smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
}
staticinlinevoid __io_wq_wake(struct wait_queue_head *wq)
{ /* * * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter * set in the mask so that if we recurse back into our own poll * waitqueue handlers, we know we have a dependency between eventfd or * epoll and should terminate multishot poll at that point.
*/ if (wq_has_sleeper(wq))
__wake_up(wq, TASK_NORMAL, 0, poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
}
staticinlinevoid io_cqring_wake(struct io_ring_ctx *ctx)
{ /* * Trigger waitqueue handler on all waiters on our waitqueue. This * won't necessarily wake up all the tasks, io_should_wake() will make * that decision.
*/
/* * SQPOLL must use the actual sqring head, as using the cached_sq_head * is race prone if the SQPOLL thread has grabbed entries but not yet * committed them to the ring. For !SQPOLL, this doesn't matter, but * since this helper is just used for SQPOLL sqring waits (or POLLOUT), * just read the actual sqring head unconditionally.
*/ return READ_ONCE(r->sq.tail) - READ_ONCE(r->sq.head) == ctx->sq_entries;
}
/* make sure SQ entry isn't read before tail */
entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; return min(entries, ctx->sq_entries);
}
staticinlineint io_run_task_work(void)
{ bool ret = false;
/* * Always check-and-clear the task_work notification signal. With how * signaling works for task_work, we can find it set with nothing to * run. We need to clear it for that case, like get_signal() does.
*/ if (test_thread_flag(TIF_NOTIFY_SIGNAL))
clear_notify_signal(); /* * PF_IO_WORKER never returns to userspace, so check here if we have * notify work that needs processing.
*/ if (current->flags & PF_IO_WORKER) { if (test_thread_flag(TIF_NOTIFY_RESUME)) {
__set_current_state(TASK_RUNNING);
resume_user_mode_work(NULL);
} if (current->io_uring) { unsignedint count = 0;
__set_current_state(TASK_RUNNING);
tctx_task_work_run(current->io_uring, UINT_MAX, &count); if (count)
ret = true;
}
} if (task_work_pending(current)) {
__set_current_state(TASK_RUNNING);
task_work_run();
ret = true;
}
/* * Don't complete immediately but use deferred completion infrastructure. * Protected by ->uring_lock and can only be used either with * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
*/ staticinlinevoid io_req_complete_defer(struct io_kiocb *req)
__must_hold(&req->ctx->uring_lock)
{ struct io_submit_state *state = &req->ctx->submit_state;
/* * Terminate the request if either of these conditions are true: * * 1) It's being executed by the original task, but that task is marked * with PF_EXITING as it's exiting. * 2) PF_KTHREAD is set, in which case the invoker of the task_work is * our fallback task_work.
*/ staticinlinebool io_should_terminate_tw(struct io_ring_ctx *ctx)
{ return (current->flags & (PF_KTHREAD | PF_EXITING)) || percpu_ref_is_dying(&ctx->refs);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.