/** * nfp_net_rx_alloc_one() - Allocate and map page frag for RX * @dp: NFP Net data path struct * @dma_addr: Pointer to storage for DMA address (output param) * * This function will allcate a new page frag, map it for DMA. * * Return: allocated page frag or NULL on failure.
*/ void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
{ void *frag;
*dma_addr = nfp_net_dma_map_rx(dp, frag); if (dma_mapping_error(dp->dev, *dma_addr)) {
nfp_net_free_frag(frag, dp->xdp_prog);
nn_dp_warn(dp, "Failed to map DMA RX buffer\n"); return NULL;
}
return frag;
}
/** * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring * @tx_ring: TX ring structure * @dp: NFP Net data path struct * @r_vec: IRQ vector servicing this ring * @idx: Ring index * @is_xdp: Is this an XDP TX ring?
*/ staticvoid
nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring, struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, unsignedint idx, bool is_xdp)
{ struct nfp_net *nn = r_vec->nfp_net;
/** * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring * @rx_ring: RX ring structure * @r_vec: IRQ vector servicing this ring * @idx: Ring index
*/ staticvoid
nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring, struct nfp_net_r_vector *r_vec, unsignedint idx)
{ struct nfp_net *nn = r_vec->nfp_net;
/** * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable * @rx_ring: RX ring structure * * Assumes that the device is stopped, must be idempotent.
*/ void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
{ unsignedint wr_idx, last_idx;
/* wr_p == rd_p means ring was never fed FL bufs. RX rings are always * kept at cnt - 1 FL bufs.
*/ if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0) return;
/* Move the empty entry to the end of the list */
wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
last_idx = rx_ring->cnt - 1; if (rx_ring->r_vec->xsk_pool) {
rx_ring->xsk_rxbufs[wr_idx] = rx_ring->xsk_rxbufs[last_idx];
memset(&rx_ring->xsk_rxbufs[last_idx], 0, sizeof(*rx_ring->xsk_rxbufs));
} else {
rx_ring->rxbufs[wr_idx] = rx_ring->rxbufs[last_idx];
memset(&rx_ring->rxbufs[last_idx], 0, sizeof(*rx_ring->rxbufs));
}
/** * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring * @dp: NFP Net data path struct * @rx_ring: RX ring to remove buffers from * * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1) * entries. After device is disabled nfp_net_rx_ring_reset() must be called * to restore required ring geometry.
*/ staticvoid
nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
{ unsignedint i;
if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) return;
for (i = 0; i < rx_ring->cnt - 1; i++) { /* NULL skb can only happen when initial filling of the ring * fails to allocate enough buffers and calls here to free * already allocated ones.
*/ if (!rx_ring->rxbufs[i].frag) continue;
/** * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW) * @dp: NFP Net data path struct * @rx_ring: RX ring to remove buffers from
*/ staticint
nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
{ struct nfp_net_rx_buf *rxbufs; unsignedint i;
if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) return 0;
rxbufs = rx_ring->rxbufs;
for (i = 0; i < rx_ring->cnt - 1; i++) {
rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr); if (!rxbufs[i].frag) {
nfp_net_rx_ring_bufs_free(dp, rx_ring); return -ENOMEM;
}
}
return 0;
}
int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
{ unsignedint r;
dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings),
GFP_KERNEL); if (!dp->tx_rings) return -ENOMEM;
if (dp->ctrl & NFP_NET_CFG_CTRL_TXRWB) {
dp->txrwb = dma_alloc_coherent(dp->dev,
dp->num_tx_rings * sizeof(u64),
&dp->txrwb_dma, GFP_KERNEL); if (!dp->txrwb) goto err_free_rings;
}
for (r = 0; r < dp->num_tx_rings; r++) { int bias = 0;
if (r >= dp->num_stack_tx_rings)
bias = dp->num_stack_tx_rings;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.