/* * Do the dance but not conditional clear_bit() because it'd race with * other threads incrementing park_pending and setting the bit.
*/
clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state); if (atomic_dec_return(&sqd->park_pending))
set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
mutex_unlock(&sqd->lock);
wake_up(&sqd->wait);
}
staticint __io_sq_thread(struct io_ring_ctx *ctx, struct io_sq_data *sqd, bool cap_entries, struct io_sq_time *ist)
{ unsignedint to_submit; int ret = 0;
to_submit = io_sqring_entries(ctx); /* if we're handling multiple rings, cap submit size for fairness */ if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE)
to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE;
if (to_submit || !wq_list_empty(&ctx->iopoll_list)) { conststruct cred *creds = NULL;
io_sq_start_worktime(ist);
if (ctx->sq_creds != current_cred())
creds = override_creds(ctx->sq_creds);
mutex_lock(&ctx->uring_lock); if (!wq_list_empty(&ctx->iopoll_list))
io_do_iopoll(ctx, true);
/* * Don't submit if refs are dying, good for io_uring_register(), * but also it is relied upon by io_ring_exit_work()
*/ if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
!(ctx->flags & IORING_SETUP_R_DISABLED))
ret = io_submit_sqes(ctx, to_submit);
mutex_unlock(&ctx->uring_lock);
if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
wake_up(&ctx->sqo_sq_wait); if (creds)
revert_creds(creds);
}
/* * Run task_work, processing the retry_list first. The retry_list holds * entries that we passed on in the previous run, if we had more task_work * than we were asked to process. Newly queued task_work isn't run until the * retry list has been fully processed.
*/ staticunsignedint io_sq_tw(struct llist_node **retry_list, int max_entries)
{ struct io_uring_task *tctx = current->io_uring; unsignedint count = 0;
if (*retry_list) {
*retry_list = io_handle_tw_list(*retry_list, &count, max_entries); if (count >= max_entries) goto out;
max_entries -= count;
}
*retry_list = tctx_task_work_run(tctx, max_entries, &count);
out: if (task_work_pending(current))
task_work_run(); return count;
}
/* * Force audit context to get setup, in case we do prep side async * operations that would trigger an audit call before any issue side * audit has been done.
*/
audit_uring_entry(IORING_OP_NOP);
audit_uring_exit(true, 0);
mutex_lock(&sqd->lock); while (1) { bool cap_entries, sqt_spin = false; struct io_sq_time ist = { };
if (io_sqd_events_pending(sqd) || signal_pending(current)) { if (io_sqd_handle_event(sqd)) break;
timeout = jiffies + sqd->sq_thread_idle;
}
cap_entries = !list_is_singular(&sqd->ctx_list);
list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { int ret = __io_sq_thread(ctx, sqd, cap_entries, &ist);
if (!sqt_spin && (ret > 0 || !wq_list_empty(&ctx->iopoll_list)))
sqt_spin = true;
} if (io_sq_tw(&retry_list, IORING_TW_CAP_ENTRIES_VALUE))
sqt_spin = true;
get_task_struct(tsk);
ret = io_uring_alloc_task_context(tsk, ctx);
wake_up_new_task(tsk); if (ret) goto err;
} elseif (p->flags & IORING_SETUP_SQ_AFF) { /* Can't have SQ_AFF without SQPOLL */
ret = -EINVAL; goto err;
} return 0;
err_sqpoll:
complete(&ctx->sq_data->exited);
err:
io_sq_thread_finish(ctx); return ret;
}
__cold int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx,
cpumask_var_t mask)
{ struct io_sq_data *sqd = ctx->sq_data; int ret = -EINVAL;
if (sqd) { struct task_struct *tsk;
io_sq_thread_park(sqd); /* Don't set affinity for a dying thread */
tsk = sqpoll_task_locked(sqd); if (tsk)
ret = io_wq_cpu_affinity(tsk->io_uring, mask);
io_sq_thread_unpark(sqd);
}
return ret;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.9 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.