if (if_running)
ixgbe_txrx_ring_disable(adapter, qid);
set_bit(qid, adapter->af_xdp_zc_qps);
if (if_running) {
ixgbe_txrx_ring_enable(adapter, qid);
/* Kick start the NAPI context so that receiving will start */
err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX); if (err) {
clear_bit(qid, adapter->af_xdp_zc_qps);
xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR); return err;
}
}
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); if (!err) return IXGBE_XDP_REDIR; if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
result = IXGBE_XDP_EXIT; else
result = IXGBE_XDP_CONSUMED; goto out_failure;
}
switch (act) { case XDP_PASS: break; case XDP_TX:
xdpf = xdp_convert_buff_to_frame(xdp); if (unlikely(!xdpf)) goto out_failure;
ring = ixgbe_determine_xdp_ring(adapter); if (static_branch_unlikely(&ixgbe_xdp_locking_key))
spin_lock(&ring->tx_lock);
result = ixgbe_xmit_xdp_ring(ring, xdpf); if (static_branch_unlikely(&ixgbe_xdp_locking_key))
spin_unlock(&ring->tx_lock); if (result == IXGBE_XDP_CONSUMED) goto out_failure; break; case XDP_DROP:
result = IXGBE_XDP_CONSUMED; break; default:
bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough; case XDP_ABORTED:
result = IXGBE_XDP_CONSUMED;
out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
} return result;
}
bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
{ union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *bi;
u16 i = rx_ring->next_to_use;
dma_addr_t dma; bool ok = true;
/* nothing to do */ if (!count) returntrue;
rx_desc = IXGBE_RX_DESC(rx_ring, i);
bi = &rx_ring->rx_buffer_info[i];
i -= rx_ring->count;
do {
bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool); if (!bi->xdp) {
ok = false; break;
}
dma = xsk_buff_xdp_get_dma(bi->xdp);
/* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info.
*/
rx_desc->read.pkt_addr = cpu_to_le64(dma);
rx_desc++;
bi++;
i++; if (unlikely(!i)) {
rx_desc = IXGBE_RX_DESC(rx_ring, 0);
bi = rx_ring->rx_buffer_info;
i -= rx_ring->count;
}
/* clear the length for the next_to_use descriptor */
rx_desc->wb.upper.length = 0;
count--;
} while (count);
i += rx_ring->count;
if (rx_ring->next_to_use != i) {
rx_ring->next_to_use = i;
/* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64).
*/
wmb();
writel(i, rx_ring->tail);
}
while (likely(total_rx_packets < budget)) { union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *bi; unsignedint size;
/* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
failure = failure ||
!ixgbe_alloc_rx_buffers_zc(rx_ring,
cleaned_count);
cleaned_count = 0;
}
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
size = le16_to_cpu(rx_desc->wb.upper.length); if (!size) break;
/* This memory barrier is needed to keep us from reading * any other fields out of the rx_desc until we know the * descriptor has been written back
*/
dma_rmb();
bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
if (unlikely(!ixgbe_test_staterr(rx_desc,
IXGBE_RXD_STAT_EOP))) { struct ixgbe_rx_buffer *next_bi;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.