/** * ice_qp_reset_stats - Resets all stats for rings of given index * @vsi: VSI that contains rings of interest * @q_idx: ring index in array
*/ staticvoid ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
{ struct ice_vsi_stats *vsi_stat; struct ice_pf *pf;
pf = vsi->back; if (!pf->vsi_stats) return;
vsi_stat = pf->vsi_stats[vsi->idx]; if (!vsi_stat) return;
/** * ice_qp_clean_rings - Cleans all the rings of a given index * @vsi: VSI that contains rings of interest * @q_idx: ring index in array
*/ staticvoid ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
{
ice_clean_tx_ring(vsi->tx_rings[q_idx]); if (vsi->xdp_rings)
ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
ice_clean_rx_ring(vsi->rx_rings[q_idx]);
}
/** * ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector * @vsi: VSI that has netdev * @q_vector: q_vector that has NAPI context * @enable: true for enable, false for disable
*/ staticvoid
ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector, bool enable)
{ if (!vsi->netdev || !q_vector) return;
if (enable)
napi_enable(&q_vector->napi); else
napi_disable(&q_vector->napi);
}
/** * ice_qvec_dis_irq - Mask off queue interrupt generation on given ring * @vsi: the VSI that contains queue vector being un-configured * @rx_ring: Rx ring that will have its IRQ disabled * @q_vector: queue vector
*/ staticvoid
ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring, struct ice_q_vector *q_vector)
{ struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw;
u16 reg;
u32 val;
/* QINT_TQCTL is being cleared in ice_vsi_stop_tx_ring, so handle * here only QINT_RQCTL
*/
reg = rx_ring->reg_idx;
val = rd32(hw, QINT_RQCTL(reg));
val &= ~QINT_RQCTL_CAUSE_ENA_M;
wr32(hw, QINT_RQCTL(reg), val);
if (q_vector) {
wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0);
ice_flush(hw);
synchronize_irq(q_vector->irq.virq);
}
}
/** * ice_qvec_cfg_msix - Enable IRQ for given queue vector * @vsi: the VSI that contains queue vector * @q_vector: queue vector * @qid: queue index
*/ staticvoid
ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, u16 qid)
{
u16 reg_idx = q_vector->reg_idx; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; int q, _qid = qid;
/** * ice_qp_ena - Enables a queue pair * @vsi: VSI of interest * @q_idx: ring index in array * * Returns 0 on success, negative on failure.
*/ staticint ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
{ struct ice_q_vector *q_vector; int fail = 0; bool link_up; int err;
err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx); if (!fail)
fail = err;
if (ice_is_xdp_ena_vsi(vsi)) { struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
/* make sure NAPI sees updated ice_{t,x}_ring::xsk_pool */
synchronize_net();
ice_get_link_status(vsi->port_info, &link_up); if (link_up) {
netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
netif_carrier_on(vsi->netdev);
}
return fail;
}
/** * ice_xsk_pool_disable - disable a buffer pool region * @vsi: Current VSI * @qid: queue ID * * Returns 0 on success, negative on failure
*/ staticint ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
{ struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
if (!pool) return -EINVAL;
xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
return 0;
}
/** * ice_xsk_pool_enable - enable a buffer pool region * @vsi: Current VSI * @pool: pointer to a requested buffer pool region * @qid: queue ID * * Returns 0 on success, negative on failure
*/ staticint
ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
{ int err;
if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF) return -EINVAL;
if (qid >= vsi->netdev->real_num_rx_queues ||
qid >= vsi->netdev->real_num_tx_queues) return -EINVAL;
err = xsk_pool_dma_map(pool, ice_pf_to_dev(vsi->back),
ICE_RX_DMA_ATTR); if (err) return err;
return 0;
}
/** * ice_realloc_rx_xdp_bufs - reallocate for either XSK or normal buffer * @rx_ring: Rx ring * @pool_present: is pool for XSK present * * Try allocating memory and return ENOMEM, if failed to allocate. * If allocation was successful, substitute buffer with allocated one. * Returns 0 on success, negative on failure
*/ staticint
ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
{
size_t elem_size = pool_present ? sizeof(*rx_ring->xdp_buf) : sizeof(*rx_ring->rx_buf); void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
/** * ice_realloc_zc_buf - reallocate XDP ZC queue pairs * @vsi: Current VSI * @zc: is zero copy set * * Reallocate buffer for rx_rings that might be used by XSK. * XDP requires more memory, than rx_buf provides. * Returns 0 on success, negative on failure
*/ int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
{ struct ice_rx_ring *rx_ring;
uint i;
ice_for_each_rxq(vsi, i) {
rx_ring = vsi->rx_rings[i]; if (!rx_ring->xsk_pool) continue;
if (ice_realloc_rx_xdp_bufs(rx_ring, zc)) return -ENOMEM;
}
return 0;
}
/** * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state * @vsi: Current VSI * @pool: buffer pool to enable/associate to a ring, NULL to disable * @qid: queue ID * * Returns 0 on success, negative on failure
*/ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
{ bool if_running, pool_present = !!pool; int ret = 0, pool_failure = 0;
if (qid >= vsi->num_rxq || qid >= vsi->num_txq) {
netdev_err(vsi->netdev, "Please use queue id in scope of combined queues count\n");
pool_failure = -EINVAL; goto failure;
}
if (if_running) { struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
ret = ice_qp_dis(vsi, qid); if (ret) {
netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret); goto xsk_pool_if_up;
}
ret = ice_realloc_rx_xdp_bufs(rx_ring, pool_present); if (ret) goto xsk_pool_if_up;
}
pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
ice_xsk_pool_disable(vsi, qid);
xsk_pool_if_up: if (if_running) {
ret = ice_qp_ena(vsi, qid); if (!ret && pool_present)
napi_schedule(&vsi->rx_rings[qid]->xdp_ring->q_vector->napi); elseif (ret)
netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
}
failure: if (pool_failure) {
netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
pool_present ? "en" : "dis", pool_failure); return pool_failure;
}
return ret;
}
/** * ice_fill_rx_descs - pick buffers from XSK buffer pool and use it * @pool: XSK Buffer pool to pull the buffers from * @xdp: SW ring of xdp_buff that will hold the buffers * @rx_desc: Pointer to Rx descriptors that will be filled * @count: The number of buffers to allocate * * This function allocates a number of Rx buffers from the fill ring * or the internal recycle mechanism and places them on the Rx ring. * * Note that ring wrap should be handled by caller of this function. * * Returns the amount of allocated Rx descriptors
*/ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp, union ice_32b_rx_flex_desc *rx_desc, u16 count)
{
dma_addr_t dma;
u16 buffs; int i;
buffs = xsk_buff_alloc_batch(pool, xdp, count); for (i = 0; i < buffs; i++) {
dma = xsk_buff_xdp_get_dma(*xdp);
rx_desc->read.pkt_addr = cpu_to_le64(dma);
rx_desc->wb.status_error0 = 0;
/* Put private info that changes on a per-packet basis * into xdp_buff_xsk->cb.
*/
ice_xdp_meta_set_desc(*xdp, rx_desc);
rx_desc++;
xdp++;
}
return buffs;
}
/** * __ice_alloc_rx_bufs_zc - allocate a number of Rx buffers * @rx_ring: Rx ring * @xsk_pool: XSK buffer pool to pick buffers to be filled by HW * @count: The number of buffers to allocate * * Place the @count of descriptors onto Rx ring. Handle the ring wrap * for case where space from next_to_use up to the end of ring is less * than @count. Finally do a tail bump. * * Returns true if all allocations were successful, false if any fail.
*/ staticbool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, struct xsk_buff_pool *xsk_pool, u16 count)
{
u32 nb_buffs_extra = 0, nb_buffs = 0; union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use;
u16 total_count = count; struct xdp_buff **xdp;
/** * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers * @rx_ring: Rx ring * @xsk_pool: XSK buffer pool to pick buffers to be filled by HW * @count: The number of buffers to allocate * * Wrapper for internal allocation routine; figure out how many tail * bumps should take place based on the given threshold * * Returns true if all calls to internal alloc routine succeeded
*/ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, struct xsk_buff_pool *xsk_pool, u16 count)
{
u16 rx_thresh = ICE_RING_QUARTER(rx_ring);
u16 leftover, i, tail_bumps;
for (i = 0; i < tail_bumps; i++) if (!__ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, rx_thresh)) returnfalse; return __ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, leftover);
}
/** * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer * @rx_ring: Rx ring * @xdp: Pointer to XDP buffer * * This function allocates a new skb from a zero-copy Rx buffer. * * Returns the skb on success, NULL on failure.
*/ staticstruct sk_buff *
ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
{ unsignedint totalsize = xdp->data_end - xdp->data_meta; unsignedint metasize = xdp->data - xdp->data_meta; struct skb_shared_info *sinfo = NULL; struct sk_buff *skb;
u32 nr_frags = 0;
ntc++; if (ntc >= xdp_ring->count)
ntc = 0;
}
skip:
tx_desc->cmd_type_offset_bsz = 0;
xdp_ring->next_to_clean += completed_frames; if (xdp_ring->next_to_clean >= cnt)
xdp_ring->next_to_clean -= cnt; if (xsk_frames)
xsk_tx_completed(xsk_pool, xsk_frames);
return completed_frames;
}
/** * ice_xmit_xdp_tx_zc - AF_XDP ZC handler for XDP_TX * @xdp: XDP buffer to xmit * @xdp_ring: XDP ring to produce descriptor onto * @xsk_pool: AF_XDP buffer pool pointer * * note that this function works directly on xdp_buff, no need to convert * it to xdp_frame. xdp_buff pointer is stored to ice_tx_buf so that cleaning * side will be able to xsk_buff_free() it. * * Returns ICE_XDP_TX for successfully produced desc, ICE_XDP_CONSUMED if there * was not enough space on XDP ring
*/ staticint ice_xmit_xdp_tx_zc(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool)
{ struct skb_shared_info *sinfo = NULL;
u32 size = xdp->data_end - xdp->data;
u32 ntu = xdp_ring->next_to_use; struct ice_tx_desc *tx_desc; struct ice_tx_buf *tx_buf; struct xdp_buff *head;
u32 nr_frags = 0;
u32 free_space;
u32 frag = 0;
xdp_ring->next_to_use = ntu; /* update last descriptor from a frame with EOP */
tx_desc->cmd_type_offset_bsz |=
cpu_to_le64(ICE_TX_DESC_CMD_EOP << ICE_TXD_QW1_CMD_S);
return ICE_XDP_TX;
busy:
xdp_ring->ring_stats->tx_stats.tx_busy++;
return ICE_XDP_CONSUMED;
}
/** * ice_run_xdp_zc - Executes an XDP program in zero-copy path * @rx_ring: Rx ring * @xdp: xdp_buff used as input to the XDP program * @xdp_prog: XDP program to run * @xdp_ring: ring to be used for XDP_TX action * @xsk_pool: AF_XDP buffer pool pointer * * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/ staticint
ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool)
{ int err, result = ICE_XDP_PASS;
u32 act;
act = bpf_prog_run_xdp(xdp_prog, xdp);
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); if (!err) return ICE_XDP_REDIR; if (xsk_uses_need_wakeup(xsk_pool) && err == -ENOBUFS)
result = ICE_XDP_EXIT; else
result = ICE_XDP_CONSUMED; goto out_failure;
}
switch (act) { case XDP_PASS: break; case XDP_TX:
result = ice_xmit_xdp_tx_zc(xdp, xdp_ring, xsk_pool); if (result == ICE_XDP_CONSUMED) goto out_failure; break; case XDP_DROP:
result = ICE_XDP_CONSUMED; break; default:
bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough; case XDP_ABORTED:
result = ICE_XDP_CONSUMED;
out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act); break;
}
return result;
}
/** * ice_clean_rx_irq_zc - consumes packets from the hardware ring * @rx_ring: AF_XDP Rx ring * @xsk_pool: AF_XDP buffer pool pointer * @budget: NAPI budget * * Returns number of processed packets on success, remaining budget on failure.
*/ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, struct xsk_buff_pool *xsk_pool, int budget)
{ unsignedint total_rx_bytes = 0, total_rx_packets = 0;
u32 ntc = rx_ring->next_to_clean;
u32 ntu = rx_ring->next_to_use; struct xdp_buff *first = NULL; struct ice_tx_ring *xdp_ring; unsignedint xdp_xmit = 0; struct bpf_prog *xdp_prog;
u32 cnt = rx_ring->count; bool failure = false; int entries_to_alloc;
/* ZC patch is enabled only when XDP program is set, * so here it can not be NULL
*/
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
xdp_ring = rx_ring->xdp_ring;
if (ntc != rx_ring->first_desc)
first = *ice_xdp_buf(rx_ring, rx_ring->first_desc);
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits)) break;
/* This memory barrier is needed to keep us from reading * any other fields out of the rx_desc until we have * verified the descriptor has been written back.
*/
dma_rmb();
if (xsk_uses_need_wakeup(xsk_pool)) { /* ntu could have changed when allocating entries above, so * use rx_ring value instead of stack based one
*/ if (failure || ntc == rx_ring->next_to_use)
xsk_set_rx_need_wakeup(xsk_pool); else
xsk_clear_rx_need_wakeup(xsk_pool);
/** * ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor * @xdp_ring: XDP ring to produce the HW Tx descriptor on * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW * @desc: AF_XDP descriptor to pull the DMA address and length from * @total_bytes: bytes accumulator that will be used for stats update
*/ staticvoid ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool, struct xdp_desc *desc, unsignedint *total_bytes)
{ struct ice_tx_desc *tx_desc;
dma_addr_t dma;
/** * ice_xmit_pkt_batch - produce a batch of HW Tx descriptors out of AF_XDP descriptors * @xdp_ring: XDP ring to produce the HW Tx descriptors on * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from * @total_bytes: bytes accumulator that will be used for stats update
*/ staticvoid ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool, struct xdp_desc *descs, unsignedint *total_bytes)
{
u16 ntu = xdp_ring->next_to_use; struct ice_tx_desc *tx_desc;
u32 i;
unrolled_count(PKTS_PER_BATCH) for (i = 0; i < PKTS_PER_BATCH; i++) {
dma_addr_t dma;
/** * ice_fill_tx_hw_ring - produce the number of Tx descriptors onto ring * @xdp_ring: XDP ring to produce the HW Tx descriptors on * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from * @nb_pkts: count of packets to be send * @total_bytes: bytes accumulator that will be used for stats update
*/ staticvoid ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool, struct xdp_desc *descs, u32 nb_pkts, unsignedint *total_bytes)
{
u32 batched, leftover, i;
batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH);
leftover = nb_pkts & (PKTS_PER_BATCH - 1); for (i = 0; i < batched; i += PKTS_PER_BATCH)
ice_xmit_pkt_batch(xdp_ring, xsk_pool, &descs[i], total_bytes); for (; i < batched + leftover; i++)
ice_xmit_pkt(xdp_ring, xsk_pool, &descs[i], total_bytes);
}
/** * ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring * @xdp_ring: XDP ring to produce the HW Tx descriptors on * @xsk_pool: AF_XDP buffer pool pointer * * Returns true if there is no more work that needs to be done, false otherwise
*/ bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool)
{ struct xdp_desc *descs = xsk_pool->tx_descs;
u32 nb_pkts, nb_processed = 0; unsignedint total_bytes = 0; int budget;
ice_clean_xdp_irq_zc(xdp_ring, xsk_pool);
if (!netif_carrier_ok(xdp_ring->vsi->netdev) ||
!netif_running(xdp_ring->vsi->netdev)) returntrue;
if (xsk_uses_need_wakeup(xsk_pool))
xsk_set_tx_need_wakeup(xsk_pool);
return nb_pkts < budget;
}
/** * ice_xsk_wakeup - Implements ndo_xsk_wakeup * @netdev: net_device * @queue_id: queue to wake up * @flags: ignored in our case, since we have Rx and Tx in the same NAPI * * Returns negative on error, zero otherwise.
*/ int
ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
u32 __always_unused flags)
{ struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_q_vector *q_vector; struct ice_vsi *vsi = np->vsi; struct ice_tx_ring *ring;
if (test_bit(ICE_VSI_DOWN, vsi->state) || !netif_carrier_ok(netdev)) return -ENETDOWN;
if (!ice_is_xdp_ena_vsi(vsi)) return -EINVAL;
if (queue_id >= vsi->num_txq || queue_id >= vsi->num_rxq) return -EINVAL;
ring = vsi->rx_rings[queue_id]->xdp_ring;
if (!READ_ONCE(ring->xsk_pool)) return -EINVAL;
/* The idea here is that if NAPI is running, mark a miss, so * it will run again. If not, trigger an interrupt and * schedule the NAPI from interrupt context. If NAPI would be * scheduled here, the interrupt affinity would not be * honored.
*/
q_vector = ring->q_vector; if (!napi_if_scheduled_mark_missed(&q_vector->napi))
ice_trigger_sw_intr(&vsi->back->hw, q_vector);
return 0;
}
/** * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP buff pool attached * @vsi: VSI to be checked * * Returns true if any of the Rx rings has an AF_XDP buff pool attached
*/ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
{ int i;
ice_for_each_rxq(vsi, i) { if (xsk_get_pool_from_qid(vsi->netdev, i)) returntrue;
}
returnfalse;
}
/** * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring * @rx_ring: ring to be cleaned
*/ void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
u16 ntc = rx_ring->next_to_clean;
u16 ntu = rx_ring->next_to_use;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.