/** * i40e_realloc_rx_xdp_bi - reallocate SW ring for either XSK or normal buffer * @rx_ring: Current rx ring * @pool_present: is pool for XSK present * * Try allocating memory and return ENOMEM, if failed to allocate. * If allocation was successful, substitute buffer with allocated one. * Returns 0 on success, negative on failure
*/ staticint i40e_realloc_rx_xdp_bi(struct i40e_ring *rx_ring, bool pool_present)
{
size_t elem_size = pool_present ? sizeof(*rx_ring->rx_bi_zc) : sizeof(*rx_ring->rx_bi); void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
/** * i40e_realloc_rx_bi_zc - reallocate rx SW rings * @vsi: Current VSI * @zc: is zero copy set * * Reallocate buffer for rx_rings that might be used by XSK. * XDP requires more memory, than rx_buf provides. * Returns 0 on success, negative on failure
*/ int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc)
{ struct i40e_ring *rx_ring; unsignedlong q;
/** * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a * certain ring/qid * @vsi: Current VSI * @pool: buffer pool * @qid: Rx ring to associate buffer pool with * * Returns 0 on success, <0 on failure
**/ staticint i40e_xsk_pool_enable(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid)
{ struct net_device *netdev = vsi->netdev; bool if_running; int err;
if (vsi->type != I40E_VSI_MAIN) return -EINVAL;
if (qid >= vsi->num_queue_pairs) return -EINVAL;
if (qid >= netdev->real_num_rx_queues ||
qid >= netdev->real_num_tx_queues) return -EINVAL;
err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR); if (err) return err;
if (if_running) {
err = i40e_queue_pair_disable(vsi, qid); if (err) return err;
err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], true); if (err) return err;
err = i40e_queue_pair_enable(vsi, qid); if (err) return err;
/* Kick start the NAPI context so that receiving will start */
err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX); if (err) return err;
}
return 0;
}
/** * i40e_xsk_pool_disable - Disassociate an AF_XDP buffer pool from a * certain ring/qid * @vsi: Current VSI * @qid: Rx ring to associate buffer pool with * * Returns 0 on success, <0 on failure
**/ staticint i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
{ struct net_device *netdev = vsi->netdev; struct xsk_buff_pool *pool; bool if_running; int err;
pool = xsk_get_pool_from_qid(netdev, qid); if (!pool) return -EINVAL;
if (if_running) {
err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], false); if (err) return err;
err = i40e_queue_pair_enable(vsi, qid); if (err) return err;
}
return 0;
}
/** * i40e_xsk_pool_setup - Enable/disassociate an AF_XDP buffer pool to/from * a ring/qid * @vsi: Current VSI * @pool: Buffer pool to enable/associate to a ring, or NULL to disable * @qid: Rx ring to (dis)associate buffer pool (from)to * * This function enables or disables a buffer pool to a certain ring. * * Returns 0 on success, <0 on failure
**/ int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid)
{ return pool ? i40e_xsk_pool_enable(vsi, pool, qid) :
i40e_xsk_pool_disable(vsi, qid);
}
/** * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff * @rx_ring: Rx ring * @xdp: xdp_buff used as input to the XDP program * @xdp_prog: XDP program to run * * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
**/ staticint i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
{ int err, result = I40E_XDP_PASS; struct i40e_ring *xdp_ring;
u32 act;
act = bpf_prog_run_xdp(xdp_prog, xdp);
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); if (!err) return I40E_XDP_REDIR; if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
result = I40E_XDP_EXIT; else
result = I40E_XDP_CONSUMED; goto out_failure;
}
switch (act) { case XDP_PASS: break; case XDP_TX:
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); if (result == I40E_XDP_CONSUMED) goto out_failure; break; case XDP_DROP:
result = I40E_XDP_CONSUMED; break; default:
bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough; case XDP_ABORTED:
result = I40E_XDP_CONSUMED;
out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
} return result;
}
if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX) return;
if (xdp_res == I40E_XDP_EXIT) {
*failure = true; return;
}
if (xdp_res == I40E_XDP_CONSUMED) {
xsk_buff_free(xdp_buff); return;
} if (xdp_res == I40E_XDP_PASS) { /* NB! We are not checking for errors using * i40e_test_staterr with * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that * SBP is *not* set in PRT_SBPVSI (default not set).
*/
skb = i40e_construct_skb_zc(rx_ring, xdp_buff); if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
*rx_packets = 0;
*rx_bytes = 0; return;
}
/* Should never get here, as all valid cases have been handled already.
*/
WARN_ON_ONCE(1);
}
/** * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring * @rx_ring: Rx ring * @budget: NAPI budget * * Returns amount of work completed
**/ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
{ unsignedint total_rx_bytes = 0, total_rx_packets = 0;
u16 next_to_process = rx_ring->next_to_process;
u16 next_to_clean = rx_ring->next_to_clean; unsignedint xdp_res, xdp_xmit = 0; struct xdp_buff *first = NULL;
u32 count = rx_ring->count; struct bpf_prog *xdp_prog;
u32 entries_to_alloc; bool failure = false;
if (next_to_process != next_to_clean)
first = *i40e_rx_bi(rx_ring, next_to_clean);
/* NB! xdp_prog will always be !NULL, due to the fact that * this path is enabled by setting an XDP program.
*/
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
while (likely(total_rx_packets < (unsignedint)budget)) { union i40e_rx_desc *rx_desc; unsignedint rx_packets; unsignedint rx_bytes; struct xdp_buff *bi; unsignedint size;
u64 qword;
/* This memory barrier is needed to keep us from reading * any other fields out of the rx_desc until we have * verified the descriptor has been written back.
*/
dma_rmb();
if (i40e_rx_is_programming_status(qword)) {
i40e_clean_programming_status(rx_ring,
rx_desc->raw.qword[0],
qword);
bi = *i40e_rx_bi(rx_ring, next_to_process);
xsk_buff_free(bi); if (++next_to_process == count)
next_to_process = 0; continue;
}
size = FIELD_GET(I40E_RXD_QW1_LENGTH_PBUF_MASK, qword); if (!size) break;
bi = *i40e_rx_bi(rx_ring, next_to_process);
xsk_buff_set_size(bi, size);
xsk_buff_dma_sync_for_cpu(bi);
if (!first)
first = bi; elseif (!xsk_buff_add_frag(first, bi)) {
xsk_buff_free(first); break;
}
if (++next_to_process == count)
next_to_process = 0;
/** * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup * @dev: the netdevice * @queue_id: queue id to wake up * @flags: ignored in our case since we have Rx and Tx in the same NAPI. * * Returns <0 for errors, 0 otherwise.
**/ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
{ struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_ring *ring;
if (test_bit(__I40E_CONFIG_BUSY, pf->state)) return -EAGAIN;
if (test_bit(__I40E_VSI_DOWN, vsi->state)) return -ENETDOWN;
if (!i40e_enabled_xdp_vsi(vsi)) return -EINVAL;
if (queue_id >= vsi->num_queue_pairs) return -EINVAL;
if (!vsi->xdp_rings[queue_id]->xsk_pool) return -EINVAL;
ring = vsi->xdp_rings[queue_id];
/* The idea here is that if NAPI is running, mark a miss, so * it will run again. If not, trigger an interrupt and * schedule the NAPI from interrupt context. If NAPI would be * scheduled here, the interrupt affinity would not be * honored.
*/ if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi))
i40e_force_wb(vsi, ring->q_vector);
while (ntc != ntu) {
tx_bi = &tx_ring->tx_bi[ntc];
if (tx_bi->xdpf)
i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); else
xsk_frames++;
tx_bi->xdpf = NULL;
ntc++; if (ntc >= tx_ring->count)
ntc = 0;
}
if (xsk_frames)
xsk_tx_completed(bp, xsk_frames);
}
/** * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have an AF_XDP * buffer pool attached * @vsi: vsi * * Returns true if any of the Rx rings has an AF_XDP buffer pool attached
**/ bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
{ struct net_device *netdev = vsi->netdev; int i;
for (i = 0; i < vsi->num_queue_pairs; i++) { if (xsk_get_pool_from_qid(netdev, i)) returntrue;
}
returnfalse;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.27 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.