/** * ice_prgm_fdir_fltr - Program a Flow Director filter * @vsi: VSI to send dummy packet * @fdir_desc: flow director descriptor * @raw_packet: allocated buffer for flow director
*/ int
ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
u8 *raw_packet)
{ struct ice_tx_buf *tx_buf, *first; struct ice_fltr_desc *f_desc; struct ice_tx_desc *tx_desc; struct ice_tx_ring *tx_ring; struct device *dev;
dma_addr_t dma;
u32 td_cmd;
u16 i;
/* VSI and Tx ring */ if (!vsi) return -ENOENT;
tx_ring = vsi->tx_rings[0]; if (!tx_ring || !tx_ring->desc) return -ENOENT;
dev = tx_ring->dev;
/* we are using two descriptors to add/del a filter and we can wait */ for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) { if (!i) return -EAGAIN;
msleep_interruptible(1);
}
/* grab the next descriptor */
i = tx_ring->next_to_use;
first = &tx_ring->tx_buf[i];
f_desc = ICE_TX_FDIRDESC(tx_ring, i);
memcpy(f_desc, fdir_desc, sizeof(*f_desc));
i++;
i = (i < tx_ring->count) ? i : 0;
tx_desc = ICE_TX_DESC(tx_ring, i);
tx_buf = &tx_ring->tx_buf[i];
i++;
tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
/* Force memory write to complete before letting h/w know * there are new descriptors to fetch.
*/
wmb();
/* mark the data descriptor to be watched */
first->next_to_watch = tx_desc;
writel(tx_ring->next_to_use, tx_ring->tail);
return 0;
}
/** * ice_unmap_and_free_tx_buf - Release a Tx buffer * @ring: the ring that owns the buffer * @tx_buf: the buffer to free
*/ staticvoid
ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
{ if (dma_unmap_len(tx_buf, len))
dma_unmap_page(ring->dev,
dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len),
DMA_TO_DEVICE);
switch (tx_buf->type) { case ICE_TX_BUF_DUMMY:
devm_kfree(ring->dev, tx_buf->raw_buf); break; case ICE_TX_BUF_SKB:
dev_kfree_skb_any(tx_buf->skb); break; case ICE_TX_BUF_XDP_TX:
page_frag_free(tx_buf->raw_buf); break; case ICE_TX_BUF_XDP_XMIT:
xdp_return_frame(tx_buf->xdpf); break;
}
tx_buf->next_to_watch = NULL;
tx_buf->type = ICE_TX_BUF_EMPTY;
dma_unmap_len_set(tx_buf, len, 0); /* tx_buf must be completely set up in the transmit path */
}
/** * ice_clean_tx_irq - Reclaim resources after transmit completes * @tx_ring: Tx ring to clean * @napi_budget: Used to determine if we are in netpoll * * Returns true if there's any budget left (e.g. the clean is finished)
*/ staticbool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
{ unsignedint total_bytes = 0, total_pkts = 0; unsignedint budget = ICE_DFLT_IRQ_WORK; struct ice_vsi *vsi = tx_ring->vsi;
s16 i = tx_ring->next_to_clean; struct ice_tx_desc *tx_desc; struct ice_tx_buf *tx_buf;
/* get the bql data ready */
netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring));
tx_buf = &tx_ring->tx_buf[i];
tx_desc = ICE_TX_DESC(tx_ring, i);
i -= tx_ring->count;
prefetch(&vsi->state);
do { struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
/* if next_to_watch is not set then there is no work pending */ if (!eop_desc) break;
/* follow the guidelines of other drivers */
prefetchw(&tx_buf->skb->users);
smp_rmb(); /* prevent any other reads prior to eop_desc */
ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); /* if the descriptor isn't done, no work yet to do */ if (!(eop_desc->cmd_type_offset_bsz &
cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) break;
/* unmap any remaining paged data */ if (dma_unmap_len(tx_buf, len)) {
dma_unmap_page(tx_ring->dev,
dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len),
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buf, len, 0);
}
}
ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf);
/* move us one more past the eop_desc for start of next pkt */
tx_buf++;
tx_desc++;
i++; if (unlikely(!i)) {
i -= tx_ring->count;
tx_buf = tx_ring->tx_buf;
tx_desc = ICE_TX_DESC(tx_ring, 0);
}
prefetch(tx_desc);
/* update budget accounting */
budget--;
} while (likely(budget));
#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
(ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean.
*/
smp_mb(); if (netif_tx_queue_stopped(txring_txq(tx_ring)) &&
!test_bit(ICE_VSI_DOWN, vsi->state)) {
netif_tx_wake_queue(txring_txq(tx_ring));
++tx_ring->ring_stats->tx_stats.restart_q;
}
}
return !!budget;
}
/** * ice_setup_tx_ring - Allocate the Tx descriptors * @tx_ring: the Tx ring to set up * * Return 0 on success, negative on error
*/ int ice_setup_tx_ring(struct ice_tx_ring *tx_ring)
{ struct device *dev = tx_ring->dev;
u32 size;
if (!dev) return -ENOMEM;
/* warn if we are about to overwrite the pointer */
WARN_ON(tx_ring->tx_buf);
tx_ring->tx_buf =
devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count,
GFP_KERNEL); if (!tx_ring->tx_buf) return -ENOMEM;
/* round up to nearest page */
size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
PAGE_SIZE);
tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma,
GFP_KERNEL); if (!tx_ring->desc) {
dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
size); goto err;
}
/* ring already cleared, nothing to do */ if (!rx_ring->rx_buf) return;
if (rx_ring->xsk_pool) {
ice_xsk_clean_rx_ring(rx_ring); goto rx_skip_free;
}
if (xdp->data) {
xdp_return_buff(xdp);
xdp->data = NULL;
}
/* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
if (!rx_buf->page) continue;
/* Invalidate cache lines that may have been written to by * device so that we avoid corrupting memory.
*/
dma_sync_single_range_for_cpu(dev, rx_buf->dma,
rx_buf->page_offset,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
/** * ice_setup_rx_ring - Allocate the Rx descriptors * @rx_ring: the Rx ring to set up * * Return 0 on success, negative on error
*/ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
{ struct device *dev = rx_ring->dev;
u32 size;
if (!dev) return -ENOMEM;
/* warn if we are about to overwrite the pointer */
WARN_ON(rx_ring->rx_buf);
rx_ring->rx_buf =
kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL); if (!rx_ring->rx_buf) return -ENOMEM;
/* round up to nearest page */
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
PAGE_SIZE);
rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma,
GFP_KERNEL); if (!rx_ring->desc) {
dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
size); goto err;
}
/** * ice_run_xdp - Executes an XDP program on initialized xdp_buff * @rx_ring: Rx ring * @xdp: xdp_buff used as input to the XDP program * @xdp_prog: XDP program to run * @xdp_ring: ring to be used for XDP_TX action * @eop_desc: Last descriptor in packet to read metadata from * * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/ static u32
ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring, union ice_32b_rx_flex_desc *eop_desc)
{ unsignedint ret = ICE_XDP_PASS;
u32 act;
if (!xdp_prog) gotoexit;
ice_xdp_meta_set_desc(xdp, eop_desc);
act = bpf_prog_run_xdp(xdp_prog, xdp); switch (act) { case XDP_PASS: break; case XDP_TX: if (static_branch_unlikely(&ice_xdp_locking_key))
spin_lock(&xdp_ring->tx_lock);
ret = __ice_xmit_xdp_ring(xdp, xdp_ring, false); if (static_branch_unlikely(&ice_xdp_locking_key))
spin_unlock(&xdp_ring->tx_lock); if (ret == ICE_XDP_CONSUMED) goto out_failure; break; case XDP_REDIRECT: if (xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog)) goto out_failure;
ret = ICE_XDP_REDIR; break; default:
bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough; case XDP_ABORTED:
out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough; case XDP_DROP:
ret = ICE_XDP_CONSUMED;
} exit: return ret;
}
/** * ice_xmit_xdp_ring - submit frame to XDP ring for transmission * @xdpf: XDP frame that will be converted to XDP buff * @xdp_ring: XDP ring for transmission
*/ staticint ice_xmit_xdp_ring(conststruct xdp_frame *xdpf, struct ice_tx_ring *xdp_ring)
{ struct xdp_buff xdp;
/** * ice_xdp_xmit - submit packets to XDP ring for transmission * @dev: netdev * @n: number of XDP frames to be transmitted * @frames: XDP frames to be transmitted * @flags: transmit flags * * Returns number of frames successfully sent. Failed frames * will be free'ed by XDP core. * For error cases, a negative errno code is returned and no-frames * are transmitted (caller must handle freeing frames).
*/ int
ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags)
{ struct ice_netdev_priv *np = netdev_priv(dev); unsignedint queue_index = smp_processor_id(); struct ice_vsi *vsi = np->vsi; struct ice_tx_ring *xdp_ring; struct ice_tx_buf *tx_buf; int nxmit = 0, i;
if (test_bit(ICE_VSI_DOWN, vsi->state)) return -ENETDOWN;
if (!ice_is_xdp_ena_vsi(vsi)) return -ENXIO;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL;
if (static_branch_unlikely(&ice_xdp_locking_key)) {
queue_index %= vsi->num_xdp_txq;
xdp_ring = vsi->xdp_rings[queue_index];
spin_lock(&xdp_ring->tx_lock);
} else { /* Generally, should not happen */ if (unlikely(queue_index >= vsi->num_xdp_txq)) return -ENXIO;
xdp_ring = vsi->xdp_rings[queue_index];
}
tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use]; for (i = 0; i < n; i++) { conststruct xdp_frame *xdpf = frames[i]; int err;
tx_buf->rs_idx = ice_set_rs_bit(xdp_ring); if (unlikely(flags & XDP_XMIT_FLUSH))
ice_xdp_ring_update_tail(xdp_ring);
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_unlock(&xdp_ring->tx_lock);
return nxmit;
}
/** * ice_alloc_mapped_page - recycle or make a new page * @rx_ring: ring to use * @bi: rx_buf struct to modify * * Returns true if the page was successfully allocated or * reused.
*/ staticbool
ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
{ struct page *page = bi->page;
dma_addr_t dma;
/* since we are recycling buffers we should seldom need to alloc */ if (likely(page)) returntrue;
/* alloc new page for storage */
page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); if (unlikely(!page)) {
rx_ring->ring_stats->rx_stats.alloc_page_failed++; returnfalse;
}
/* map page for use */
dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
/* if mapping failed free memory back to system since * there isn't much point in holding memory we can't use
*/ if (dma_mapping_error(rx_ring->dev, dma)) {
__free_pages(page, ice_rx_pg_order(rx_ring));
rx_ring->ring_stats->rx_stats.alloc_page_failed++; returnfalse;
}
/** * ice_init_ctrl_rx_descs - Initialize Rx descriptors for control vsi. * @rx_ring: ring to init descriptors on * @count: number of descriptors to initialize
*/ void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 count)
{ union ice_32b_rx_flex_desc *rx_desc;
u32 ntu = rx_ring->next_to_use;
if (!count) return;
rx_desc = ICE_RX_DESC(rx_ring, ntu);
do {
rx_desc++;
ntu++; if (unlikely(ntu == rx_ring->count)) {
rx_desc = ICE_RX_DESC(rx_ring, 0);
ntu = 0;
}
rx_desc->wb.status_error0 = 0;
count--;
} while (count);
if (rx_ring->next_to_use != ntu)
ice_release_rx_desc(rx_ring, ntu);
}
/** * ice_alloc_rx_bufs - Replace used receive buffers * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace * * Returns false if all allocations were successful, true if any fail. Returning * true signals to the caller that we didn't replace cleaned_count buffers and * there is more work to do. * * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx * buffers. Then bump tail at most one time. Grouping like this lets us avoid * multiple tail writes per call.
*/ bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsignedint cleaned_count)
{ union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use; struct ice_rx_buf *bi;
/* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) returnfalse;
/* get the Rx descriptor and buffer based on next_to_use */
rx_desc = ICE_RX_DESC(rx_ring, ntu);
bi = &rx_ring->rx_buf[ntu];
do { /* if we fail here, we have work remaining */ if (!ice_alloc_mapped_page(rx_ring, bi)) break;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
bi->page_offset,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
/* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info.
*/
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
rx_desc++;
bi++;
ntu++; if (unlikely(ntu == rx_ring->count)) {
rx_desc = ICE_RX_DESC(rx_ring, 0);
bi = rx_ring->rx_buf;
ntu = 0;
}
/* clear the status bits for the next_to_use descriptor */
rx_desc->wb.status_error0 = 0;
cleaned_count--;
} while (cleaned_count);
if (rx_ring->next_to_use != ntu)
ice_release_rx_desc(rx_ring, ntu);
return !!cleaned_count;
}
/** * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse * @rx_buf: Rx buffer to adjust * @size: Size of adjustment * * Update the offset within page so that Rx buf will be ready to be reused. * For systems with PAGE_SIZE < 8192 this function will flip the page offset * so the second half of page assigned to Rx buffer will be used, otherwise * the offset is moved by "size" bytes
*/ staticvoid
ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsignedint size)
{ #if (PAGE_SIZE < 8192) /* flip page offset to other buffer */
rx_buf->page_offset ^= size; #else /* move offset up to the next cache line */
rx_buf->page_offset += size; #endif
}
/** * ice_can_reuse_rx_page - Determine if page can be reused for another Rx * @rx_buf: buffer containing the page * * If page is reusable, we have a green light for calling ice_reuse_rx_page, * which will assign the current buffer to the buffer that next_to_alloc is * pointing to; otherwise, the DMA mapping needs to be destroyed and * page freed
*/ staticbool
ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
{ unsignedint pagecnt_bias = rx_buf->pagecnt_bias; struct page *page = rx_buf->page;
/* avoid re-using remote and pfmemalloc pages */ if (!dev_page_is_reusable(page)) returnfalse;
/* if we are only owner of page we can reuse it */ if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1)) returnfalse; #if (PAGE_SIZE >= 8192) #define ICE_LAST_OFFSET \
(SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_3072) if (rx_buf->page_offset > ICE_LAST_OFFSET) returnfalse; #endif/* PAGE_SIZE >= 8192) */
/* If we have drained the page fragment pool we need to update * the pagecnt_bias and page count so that we fully restock the * number of references the driver holds.
*/ if (unlikely(pagecnt_bias == 1)) {
page_ref_add(page, USHRT_MAX - 1);
rx_buf->pagecnt_bias = USHRT_MAX;
}
returntrue;
}
/** * ice_add_xdp_frag - Add contents of Rx buffer to xdp buf as a frag * @rx_ring: Rx descriptor ring to transact packets on * @xdp: xdp buff to place the data into * @rx_buf: buffer containing page to add * @size: packet length from rx_desc * * This function will add the data contained in rx_buf->page to the xdp buf. * It will just attach the page as a frag.
*/ staticint
ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, struct ice_rx_buf *rx_buf, constunsignedint size)
{ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
if (page_is_pfmemalloc(rx_buf->page))
xdp_buff_set_frag_pfmemalloc(xdp);
return 0;
}
/** * ice_reuse_rx_page - page flip buffer and store it back on the ring * @rx_ring: Rx descriptor ring to store buffers on * @old_buf: donor buffer to have page reused * * Synchronizes page for reuse by the adapter
*/ staticvoid
ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
{
u16 nta = rx_ring->next_to_alloc; struct ice_rx_buf *new_buf;
new_buf = &rx_ring->rx_buf[nta];
/* update, and store next to alloc */
nta++;
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* Transfer page from old buffer to new buffer. * Move each member individually to avoid possible store * forwarding stalls and unnecessary copy of skb.
*/
new_buf->dma = old_buf->dma;
new_buf->page = old_buf->page;
new_buf->page_offset = old_buf->page_offset;
new_buf->pagecnt_bias = old_buf->pagecnt_bias;
}
/** * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use * @rx_ring: Rx descriptor ring to transact packets on * @size: size of buffer to add to skb * @ntc: index of next to clean element * * This function will pull an Rx buffer from the ring and synchronize it * for use by the CPU.
*/ staticstruct ice_rx_buf *
ice_get_rx_buf(struct ice_rx_ring *rx_ring, constunsignedint size, constunsignedint ntc)
{ struct ice_rx_buf *rx_buf;
if (!size) return rx_buf; /* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
rx_buf->page_offset, size,
DMA_FROM_DEVICE);
/* We have pulled a buffer for use, so decrement pagecnt_bias */
rx_buf->pagecnt_bias--;
return rx_buf;
}
/** * ice_get_pgcnts - grab page_count() for gathered fragments * @rx_ring: Rx descriptor ring to store the page counts on * @ntc: the next to clean element (not included in this frame!) * * This function is intended to be called right before running XDP * program so that the page recycling mechanism will be able to take * a correct decision regarding underlying pages; this is done in such * way as XDP program can change the refcount of page
*/ staticvoid ice_get_pgcnts(struct ice_rx_ring *rx_ring, unsignedint ntc)
{
u32 idx = rx_ring->first_desc; struct ice_rx_buf *rx_buf;
u32 cnt = rx_ring->count;
/** * ice_build_skb - Build skb around an existing buffer * @rx_ring: Rx descriptor ring to transact packets on * @xdp: xdp_buff pointing to the data * * This function builds an skb around an existing XDP buffer, taking care * to set up the skb correctly and avoid any memcpy overhead. Driver has * already combined frags (if any) to skb_shared_info.
*/ staticstruct sk_buff *
ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
{
u8 metasize = xdp->data - xdp->data_meta; struct skb_shared_info *sinfo = NULL; unsignedint nr_frags; struct sk_buff *skb;
if (unlikely(xdp_buff_has_frags(xdp))) {
sinfo = xdp_get_shared_info_from_buff(xdp);
nr_frags = sinfo->nr_frags;
}
/* Prefetch first cache line of first page. If xdp->data_meta * is unused, this points exactly as xdp->data, otherwise we * likely have a consumer accessing first few bytes of meta * data, and then actual data.
*/
net_prefetch(xdp->data_meta); /* build an skb around the page buffer */
skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz); if (unlikely(!skb)) return NULL;
/* must to record Rx queue, otherwise OS features such as * symmetric queue won't work
*/
skb_record_rx_queue(skb, rx_ring->q_index);
/* update pointers within the skb to store the data */
skb_reserve(skb, xdp->data - xdp->data_hard_start);
__skb_put(skb, xdp->data_end - xdp->data); if (metasize)
skb_metadata_set(skb, metasize);
if (unlikely(xdp_buff_has_frags(xdp)))
xdp_update_skb_shared_info(skb, nr_frags,
sinfo->xdp_frags_size,
nr_frags * xdp->frame_sz,
xdp_buff_is_frag_pfmemalloc(xdp));
return skb;
}
/** * ice_construct_skb - Allocate skb and populate it * @rx_ring: Rx descriptor ring to transact packets on * @xdp: xdp_buff pointing to the data * * This function allocates an skb. It then populates it with the page * data from the current receive descriptor, taking care to set up the * skb correctly.
*/ staticstruct sk_buff *
ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
{ unsignedint size = xdp->data_end - xdp->data; struct skb_shared_info *sinfo = NULL; struct ice_rx_buf *rx_buf; unsignedint nr_frags = 0; unsignedint headlen; struct sk_buff *skb;
/* prefetch first cache line of first page */
net_prefetch(xdp->data);
if (unlikely(xdp_buff_has_frags(xdp))) {
sinfo = xdp_get_shared_info_from_buff(xdp);
nr_frags = sinfo->nr_frags;
}
/* allocate a skb to store the frags */
skb = napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE); if (unlikely(!skb)) return NULL;
rx_buf = &rx_ring->rx_buf[rx_ring->first_desc];
skb_record_rx_queue(skb, rx_ring->q_index); /* Determine available headroom for copy */
headlen = size; if (headlen > ICE_RX_HDR_SIZE)
headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, sizeof(long)));
/* if we exhaust the linear part then add what is left as a frag */
size -= headlen; if (size) { /* besides adding here a partial frag, we are going to add * frags from xdp_buff, make sure there is enough space for * them
*/ if (unlikely(nr_frags >= MAX_SKB_FRAGS - 1)) {
dev_kfree_skb(skb); return NULL;
}
skb_add_rx_frag(skb, 0, rx_buf->page,
rx_buf->page_offset + headlen, size,
xdp->frame_sz);
} else { /* buffer is unused, restore biased page count in Rx buffer; * data was copied onto skb's linear part so there's no * need for adjusting page offset and we can reuse this buffer * as-is
*/
rx_buf->pagecnt_bias++;
}
if (unlikely(xdp_buff_has_frags(xdp))) { struct skb_shared_info *skinfo = skb_shinfo(skb);
/** * ice_put_rx_buf - Clean up used buffer and either recycle or free * @rx_ring: Rx descriptor ring to transact packets on * @rx_buf: Rx buffer to pull data from * * This function will clean up the contents of the rx_buf. It will either * recycle the buffer or unmap it and free the associated resources.
*/ staticvoid
ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
{ if (!rx_buf) return;
if (ice_can_reuse_rx_page(rx_buf)) { /* hand second half of page back to the ring */
ice_reuse_rx_page(rx_ring, rx_buf);
} else { /* we are not reusing the buffer so unmap it */
dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
ICE_RX_DMA_ATTR);
__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
}
/* clear contents of buffer_info */
rx_buf->page = NULL;
}
/** * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all buffers in frame * @rx_ring: Rx ring with all the auxiliary data * @xdp: XDP buffer carrying linear + frags part * @ntc: the next to clean element (not included in this frame!) * @verdict: return code from XDP program execution * * Called after XDP program is completed, or on error with verdict set to * ICE_XDP_CONSUMED. * * Walk through buffers from first_desc to the end of the frame, releasing * buffers and satisfying internal page recycle mechanism. The action depends * on verdict from XDP program.
*/ staticvoid ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
u32 ntc, u32 verdict)
{
u32 idx = rx_ring->first_desc;
u32 cnt = rx_ring->count; struct ice_rx_buf *buf;
u32 xdp_frags = 0; int i = 0;
if (unlikely(xdp_buff_has_frags(xdp)))
xdp_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
while (idx != ntc) {
buf = &rx_ring->rx_buf[idx]; if (++idx == cnt)
idx = 0;
/* An XDP program could release fragments from the end of the * buffer. For these, we need to keep the pagecnt_bias as-is. * To do this, only adjust pagecnt_bias for fragments up to * the total remaining after the XDP program has run.
*/ if (verdict != ICE_XDP_CONSUMED)
ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); elseif (i++ <= xdp_frags)
buf->pagecnt_bias++;
ice_put_rx_buf(rx_ring, buf);
}
xdp->data = NULL;
rx_ring->first_desc = ntc;
}
/** * ice_clean_ctrl_rx_irq - Clean descriptors from flow director Rx ring * @rx_ring: Rx descriptor ring for ctrl_vsi to transact packets on * * This function cleans Rx descriptors from the ctrl_vsi Rx ring used * to set flow director rules on VFs.
*/ void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring)
{
u32 ntc = rx_ring->next_to_clean; unsignedint total_rx_pkts = 0;
u32 cnt = rx_ring->count;
while (likely(total_rx_pkts < ICE_DFLT_IRQ_WORK)) { struct ice_vsi *ctrl_vsi = rx_ring->vsi; union ice_32b_rx_flex_desc *rx_desc;
u16 stat_err_bits;
rx_desc = ICE_RX_DESC(rx_ring, ntc);
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits)) break;
dma_rmb();
if (ctrl_vsi->vf)
ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
/** * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @rx_ring: Rx descriptor ring to transact packets on * @budget: Total limit on number of packets to process * * This function provides a "bounce buffer" approach to Rx interrupt * processing. The advantage to this is that on systems that have * expensive overhead for IOMMU access this provides a means of avoiding * it by maintaining the mapping of the page to the system. * * Returns amount of work completed
*/ staticint ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
{ unsignedint total_rx_bytes = 0, total_rx_pkts = 0; unsignedint offset = rx_ring->rx_offset; struct xdp_buff *xdp = &rx_ring->xdp; struct ice_tx_ring *xdp_ring = NULL; struct bpf_prog *xdp_prog = NULL;
u32 ntc = rx_ring->next_to_clean;
u32 cached_ntu, xdp_verdict;
u32 cnt = rx_ring->count;
u32 xdp_xmit = 0; bool failure;
/* start the loop to process Rx packets bounded by 'budget' */ while (likely(total_rx_pkts < (unsignedint)budget)) { union ice_32b_rx_flex_desc *rx_desc; struct ice_rx_buf *rx_buf; struct sk_buff *skb; unsignedint size;
u16 stat_err_bits;
u16 vlan_tci;
/* get the Rx desc from Rx ring based on 'next_to_clean' */
rx_desc = ICE_RX_DESC(rx_ring, ntc);
/* status_error_len will always be zero for unused descriptors * because it's cleared in cleanup, and overlaps with hdr_addr * which is always zero because packet split isn't used, if the * hardware wrote DD then it will be non-zero
*/
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits)) break;
/* This memory barrier is needed to keep us from reading * any other fields out of the rx_desc until we know the * DD bit is set.
*/
dma_rmb();
/* if dim settings get stale, like when not updated for 1 * second or longer, force it to start again. This addresses the * frequent case of an idle queue being switched to by the * scheduler. The 1,000 here means 1,000 milliseconds.
*/ if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000)
rc->dim.state = DIM_START_MEASURE;
}
/** * ice_net_dim - Update net DIM algorithm * @q_vector: the vector associated with the interrupt * * Create a DIM sample and notify net_dim() so that it can possibly decide * a new ITR value based on incoming packets, bytes, and interrupts. * * This function is a no-op if the ring is not configured to dynamic ITR.
*/ staticvoid ice_net_dim(struct ice_q_vector *q_vector)
{ struct ice_ring_container *tx = &q_vector->tx; struct ice_ring_container *rx = &q_vector->rx;
if (ITR_IS_DYNAMIC(tx)) { struct dim_sample dim_sample;
/** * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register * @itr_idx: interrupt throttling index * @itr: interrupt throttling value in usecs
*/ static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
{ /* The ITR value is reported in microseconds, and the register value is * recorded in 2 microsecond units. For this reason we only need to * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this * granularity as a shift instead of division. The mask makes sure the * ITR value is never odd so we don't accidentally write into the field * prior to the ITR field.
*/
itr &= ICE_ITR_MASK;
/** * ice_enable_interrupt - re-enable MSI-X interrupt * @q_vector: the vector associated with the interrupt to enable * * If the VSI is down, the interrupt will not be re-enabled. Also, * when enabling the interrupt always reset the wb_on_itr to false * and trigger a software interrupt to clean out internal state.
*/ staticvoid ice_enable_interrupt(struct ice_q_vector *q_vector)
{ struct ice_vsi *vsi = q_vector->vsi; bool wb_en = q_vector->wb_on_itr;
u32 itr_val;
if (test_bit(ICE_DOWN, vsi->state)) return;
/* trigger an ITR delayed software interrupt when exiting busy poll, to * make sure to catch any pending cleanups that might have been missed * due to interrupt state transition. If busy poll or poll isn't * enabled, then don't update ITR, and just enable the interrupt.
*/ if (!wb_en) {
itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
} else {
q_vector->wb_on_itr = false;
/* do two things here with a single write. Set up the third ITR * index to be used for software interrupt moderation, and then * trigger a software interrupt with a rate limit of 20K on * software interrupts, this will help avoid high interrupt * loads due to frequently polling and exiting polling.
*/
itr_val = ice_buildreg_itr(ICE_IDX_ITR2, ICE_ITR_20K);
itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M |
ICE_IDX_ITR2 << GLINT_DYN_CTL_SW_ITR_INDX_S |
GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
}
wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
}
/** * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector * @q_vector: q_vector to set WB_ON_ITR on * * We need to tell hardware to write-back completed descriptors even when * interrupts are disabled. Descriptors will be written back on cache line * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR * descriptors may not be written back if they don't fill a cache line until * the next interrupt. * * This sets the write-back frequency to whatever was set previously for the * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we * aren't meddling with the INTENA_M bit.
*/ staticvoid ice_set_wb_on_itr(struct ice_q_vector *q_vector)
{ struct ice_vsi *vsi = q_vector->vsi;
/* already in wb_on_itr mode no need to change it */ if (q_vector->wb_on_itr) return;
/* use previously set ITR values for all of the ITR indices by * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and * be static in non-adaptive mode (user configured)
*/
wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
FIELD_PREP(GLINT_DYN_CTL_ITR_INDX_M, ICE_ITR_NONE) |
FIELD_PREP(GLINT_DYN_CTL_INTENA_MSK_M, 1) |
FIELD_PREP(GLINT_DYN_CTL_WB_ON_ITR_M, 1));
q_vector->wb_on_itr = true;
}
/** * ice_napi_poll - NAPI polling Rx/Tx cleanup routine * @napi: napi struct with our devices info in it * @budget: amount of work driver is allowed to do this pass, in packets * * This function will clean all queues associated with a q_vector. * * Returns the amount of work done
*/ int ice_napi_poll(struct napi_struct *napi, int budget)
{ struct ice_q_vector *q_vector =
container_of(napi, struct ice_q_vector, napi); struct ice_tx_ring *tx_ring; struct ice_rx_ring *rx_ring; bool clean_complete = true; int budget_per_ring; int work_done = 0;
/* Since the actual Tx work is minimal, we can give the Tx a larger * budget and be more aggressive about cleaning up the Tx descriptors.
*/
ice_for_each_tx_ring(tx_ring, q_vector->tx) { struct xsk_buff_pool *xsk_pool = READ_ONCE(tx_ring->xsk_pool); bool wd;
/* Handle case where we are called by netpoll with a budget of 0 */ if (unlikely(budget <= 0)) return budget;
/* normally we have 1 Rx ring per q_vector */ if (unlikely(q_vector->num_ring_rx > 1)) /* We attempt to distribute budget to each Rx queue fairly, but * don't allow the budget to go below 1 because that would exit * polling early.
*/
budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1); else /* Max of 1 Rx ring in this q_vector so give it the budget */
budget_per_ring = budget;
ice_for_each_rx_ring(rx_ring, q_vector->rx) { struct xsk_buff_pool *xsk_pool = READ_ONCE(rx_ring->xsk_pool); int cleaned;
/* A dedicated path for zero-copy allows making a single * comparison in the irq context instead of many inside the * ice_clean_rx_irq function and makes the codebase cleaner.
*/
cleaned = rx_ring->xsk_pool ?
ice_clean_rx_irq_zc(rx_ring, xsk_pool, budget_per_ring) :
ice_clean_rx_irq(rx_ring, budget_per_ring);
work_done += cleaned; /* if we clean as many as budgeted, we must not be done */ if (cleaned >= budget_per_ring)
clean_complete = false;
}
/* If work not completed, return budget and polling will return */ if (!clean_complete) { /* Set the writeback on ITR so partial completions of * cache-lines will still continue even if we're polling.
*/
ice_set_wb_on_itr(q_vector); return budget;
}
/* Exit the polling mode, but don't re-enable interrupts if stack might * poll us due to busy-polling
*/ if (napi_complete_done(napi, work_done)) {
ice_net_dim(q_vector);
ice_enable_interrupt(q_vector);
} else {
ice_set_wb_on_itr(q_vector);
}
return min_t(int, work_done, budget - 1);
}
/** * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions * @tx_ring: the ring to be checked * @size: the size buffer we want to assure is available * * Returns -EBUSY if a stop is needed, else 0
*/ staticint __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsignedint size)
{
netif_tx_stop_queue(txring_txq(tx_ring)); /* Memory barrier before checking head and tail */
smp_mb();
/* Check again in a case another CPU has just made room available. */ if (likely(ICE_DESC_UNUSED(tx_ring) < size)) return -EBUSY;
/* A reprieve! - use start_queue because it doesn't call schedule */
netif_tx_start_queue(txring_txq(tx_ring));
++tx_ring->ring_stats->tx_stats.restart_q; return 0;
}
/** * ice_maybe_stop_tx - 1st level check for Tx stop conditions * @tx_ring: the ring to be checked * @size: the size buffer we want to assure is available * * Returns 0 if stop is not needed
*/ staticint ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsignedint size)
{ if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) return 0;
return __ice_maybe_stop_tx(tx_ring, size);
}
/** * ice_tx_map - Build the Tx descriptor * @tx_ring: ring to send buffer on * @first: first buffer info buffer to use * @off: pointer to struct that holds offload parameters * * This function loops over the skb data pointed to by *first * and gets a physical address for each memory location and programs * it and the length into the transmit descriptor.
*/ staticvoid
ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first, struct ice_tx_offload_params *off)
{
u64 td_offset, td_tag, td_cmd;
u16 i = tx_ring->next_to_use; unsignedint data_len, size; struct ice_tx_desc *tx_desc; struct ice_tx_buf *tx_buf; struct sk_buff *skb;
skb_frag_t *frag;
dma_addr_t dma; bool kick;
for (frag = &skb_shinfo(skb)->frags[0];; frag++) { unsignedint max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
if (dma_mapping_error(tx_ring->dev, dma)) goto dma_error;
/* record length, and DMA address */
dma_unmap_len_set(tx_buf, len, size);
dma_unmap_addr_set(tx_buf, dma, dma);
/* align size to end of page */
max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
tx_desc->buf_addr = cpu_to_le64(dma);
/* account for data chunks larger than the hardware * can handle
*/ while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
tx_desc->cmd_type_offset_bsz =
ice_build_ctob(td_cmd, td_offset, max_data,
td_tag);
tx_desc++;
i++;
if (i == tx_ring->count) {
tx_desc = ICE_TX_DESC(tx_ring, 0);
i = 0;
}
/* record SW timestamp if HW timestamp is not available */
skb_tx_timestamp(first->skb);
i++; if (i == tx_ring->count)
i = 0;
/* write last descriptor with RS and EOP bits */
td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
tx_desc->cmd_type_offset_bsz =
ice_build_ctob(td_cmd, td_offset, size, td_tag);
/* Force memory writes to complete before letting h/w know there * are new descriptors to fetch. * * We also use this memory barrier to make certain all of the * status bits have been updated before next_to_watch is written.
*/
wmb();
/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
tx_ring->next_to_use = i;
ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */
kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount,
netdev_xmit_more()); if (kick) /* notify HW of packet */
writel(i, tx_ring->tail);
return;
dma_error: /* clear DMA mappings for failed tx_buf map */ for (;;) {
tx_buf = &tx_ring->tx_buf[i];
ice_unmap_and_free_tx_buf(tx_ring, tx_buf); if (tx_buf == first) break; if (i == 0)
i = tx_ring->count;
i--;
}
/* set the tx_flags to indicate the IP protocol type. this is * required so that checksum header computation below is accurate.
*/ if (ip.v4->version == 4)
first->tx_flags |= ICE_TX_FLAGS_IPV4; elseif (ip.v6->version == 6)
first->tx_flags |= ICE_TX_FLAGS_IPV6;
gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; /* indicate if we need to offload outer UDP header */ if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
/* record tunnel offload values */
off->cd_tunnel_params |= tunnel;
/* set DTYP=1 to indicate that it's an Tx context descriptor * in IPsec tunnel mode with Tx offloads in Quad word 1
*/
off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
/* switch L4 header pointer from outer to inner */
l4.hdr = skb_inner_transport_header(skb);
l4_proto = 0;
/* reset type as we transition from outer to inner headers */
first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6); if (ip.v4->version == 4)
first->tx_flags |= ICE_TX_FLAGS_IPV4; if (ip.v6->version == 6)
first->tx_flags |= ICE_TX_FLAGS_IPV6;
}
/* Enable IP checksum offloads */ if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
l4_proto = ip.v4->protocol; /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO.
*/ if (first->tx_flags & ICE_TX_FLAGS_TSO)
cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; else
cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
/** * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW * @tx_ring: ring to send buffer on * @first: pointer to struct ice_tx_buf * * Checks the skb and set up correspondingly several generic transmit flags * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
*/ staticvoid
ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
{ struct sk_buff *skb = first->skb;
/* nothing left to do, software offloaded VLAN */ if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol)) return;
/* the VLAN ethertype/tpid is determined by VSI configuration and netdev * feature flags, which the driver only allows either 802.1Q or 802.1ad * VLAN offloads exclusively so we only care about the VLAN ID here
*/ if (skb_vlan_tag_present(skb)) {
first->vid = skb_vlan_tag_get(skb); if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN; else
first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
}
ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
}
/** * ice_tso - computes mss and TSO length to prepare for TSO * @first: pointer to struct ice_tx_buf * @off: pointer to struct that holds offload parameters * * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
*/ static int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
{ struct sk_buff *skb = first->skb; union { struct iphdr *v4; struct ipv6hdr *v6; unsignedchar *hdr;
} ip; union { struct tcphdr *tcp; struct udphdr *udp; unsignedchar *hdr;
} l4;
u64 cd_mss, cd_tso_len;
__be16 protocol;
u32 paylen;
u8 l4_start; int err;
if (skb->ip_summed != CHECKSUM_PARTIAL) return 0;
if (!skb_is_gso(skb)) return 0;
err = skb_cow_head(skb, 0); if (err < 0) return err;
/** * ice_txd_use_count - estimate the number of descriptors needed for Tx * @size: transmit request size in bytes * * Due to hardware alignment restrictions (4K alignment), we need to * assume that we can have no more than 12K of data per descriptor, even * though each descriptor can take up to 16K - 1 bytes of aligned memory. * Thus, we need to divide by 12K. But division is slow! Instead, * we decompose the operation into shifts and one relatively cheap * multiply operation. * * To divide by 12K, we first divide by 4K, then divide by 3: * To divide by 4K, shift right by 12 bits * To divide by 3, multiply by 85, then divide by 256 * (Divide by 256 is done by shifting right by 8 bits) * Finally, we add one to round up. Because 256 isn't an exact multiple of * 3, we'll underestimate near each multiple of 12K. This is actually more * accurate as we have 4K - 1 of wiggle room that we can fit into the last * segment. For our purposes this is accurate out to 1M which is orders of * magnitude greater than our largest possible GSO size. * * This would then be implemented as: * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR; * * Since multiplication and division are commutative, we can reorder * operations into: * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
*/ staticunsignedint ice_txd_use_count(unsignedint size)
{ return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
}
/** * ice_xmit_desc_count - calculate number of Tx descriptors needed * @skb: send buffer * * Returns number of data descriptors needed for this skb.
*/ staticunsignedint ice_xmit_desc_count(struct sk_buff *skb)
{ const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; unsignedint nr_frags = skb_shinfo(skb)->nr_frags; unsignedint count = 0, size = skb_headlen(skb);
for (;;) {
count += ice_txd_use_count(size);
if (!nr_frags--) break;
size = skb_frag_size(frag++);
}
return count;
}
/** * __ice_chk_linearize - Check if there are more than 8 buffers per packet * @skb: send buffer * * Note: This HW can't DMA more than 8 buffers to build a packet on the wire * and so we need to figure out the cases where we need to linearize the skb. * * For TSO we need to count the TSO header and segment payload separately. * As such we need to check cases where we have 7 fragments or more as we * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for * the segment payload in the first descriptor, and another 7 for the * fragments.
*/ staticbool __ice_chk_linearize(struct sk_buff *skb)
{ const skb_frag_t *frag, *stale; int nr_frags, sum;
/* no need to check if number of frags is less than 7 */
nr_frags = skb_shinfo(skb)->nr_frags; if (nr_frags < (ICE_MAX_BUF_TXD - 1)) returnfalse;
/* We need to walk through the list and validate that each group * of 6 fragments totals at least gso_size.
*/
nr_frags -= ICE_MAX_BUF_TXD - 2;
frag = &skb_shinfo(skb)->frags[0];
/* Initialize size to the negative value of gso_size minus 1. We * use this as the worst case scenario in which the frag ahead * of us only provides one byte which is why we are limited to 6 * descriptors for a single transmit as the header and previous * fragment are already consuming 2 descriptors.
*/
sum = 1 - skb_shinfo(skb)->gso_size;
/* Add size of frags 0 through 4 to create our initial sum */
sum += skb_frag_size(frag++);
sum += skb_frag_size(frag++);
sum += skb_frag_size(frag++);
sum += skb_frag_size(frag++);
sum += skb_frag_size(frag++);
/* Walk through fragments adding latest fragment, testing it, and * then removing stale fragments from the sum.
*/ for (stale = &skb_shinfo(skb)->frags[0];; stale++) { int stale_size = skb_frag_size(stale);
sum += skb_frag_size(frag++);
/* The stale fragment may present us with a smaller * descriptor than the actual fragment size. To account * for that we need to remove all the data on the front and * figure out what the remainder would be in the last * descriptor associated with the fragment.
*/ if (stale_size > ICE_MAX_DATA_PER_TXD) { int align_pad = -(skb_frag_off(stale)) &
(ICE_MAX_READ_REQ_SIZE - 1);
sum -= align_pad;
stale_size -= align_pad;
do {
sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
} while (stale_size > ICE_MAX_DATA_PER_TXD);
}
/* if sum is negative we failed to make sufficient progress */ if (sum < 0) returntrue;
if (!nr_frags--) break;
sum -= stale_size;
}
returnfalse;
}
/** * ice_chk_linearize - Check if there are more than 8 fragments per packet * @skb: send buffer * @count: number of buffers used * * Note: Our HW can't scatter-gather more than 8 fragments to build * a packet on the wire and so we need to figure out the cases where we * need to linearize the skb.
*/ staticbool ice_chk_linearize(struct sk_buff *skb, unsignedint count)
{
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.27 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.