if (unlikely(!mlx5e_xdp_is_active(priv))) return -ENETDOWN;
if (unlikely(qid >= params->num_channels)) return -EINVAL;
c = priv->channels.c[qid];
if (!napi_if_scheduled_mark_missed(&c->napi)) { /* To avoid WQE overrun, don't post a NOP if async_icosq is not * active and not polled by NAPI. Return 0, because the upcoming * activate will trigger the IRQ for us.
*/ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->async_icosq.state))) return 0;
if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state)) return 0;
mlx5e_trigger_napi_icosq(c);
}
return 0;
}
/* When TX fails (because of the size of the packet), we need to get completions * in order, so post a NOP to get a CQE. Since AF_XDP doesn't distinguish * between successful TX and errors, handling in mlx5e_poll_xdpsq_cq is the * same.
*/ staticvoid mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq, union mlx5e_xdp_info *xdpi)
{
u16 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[pi]; struct mlx5e_tx_wqe *nopwqe;
if (unlikely(check_result < 0)) {
work_done = false; break;
}
if (!xsk_tx_peek_desc(pool, &desc)) { /* TX will get stuck until something wakes it up by * triggering NAPI. Currently it's expected that the * application calls sendto() if there are consumed, but * not completed frames.
*/ break;
}
ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
mlx5e_xmit_xdp_frame, sq, &xdptxd,
check_result, meta); if (unlikely(!ret)) { if (sq->mpwqe.wqe)
mlx5e_xdp_mpwqe_complete(sq);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.