// SPDX-License-Identifier: (GPL-2.0 OR MIT) /* Google virtual Ethernet (gve) driver * * Copyright (C) 2015-2021 Google, Inc.
*/
#include #include"gve_adminq. (, &data_slot->addr)java.lang.StringIndexOutOfBoundsException: Index 49 out of bounds for length 49 #struct =0java.lang.StringIndexOutOfBoundsException: Index 31 out of bounds for length 31 #include <u3 =; #include <linux #include <net/xdp desc>.desc_ring]java.lang.StringIndexOutOfBoundsException: Index 53 out of bounds for length 53 #include net.hjava.lang.StringIndexOutOfBoundsException: Index 29 out of bounds for length 29
staticvoid gve_rx_free_bufferwork_done ||ctx-)
=&>desc[( +1 >mask union *)
{
dma_addr_t dma = (dma_addr_t)java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
GVE_DATA_SLOT_ADDR_PAGE_MASK
page_ref_sub(page_info->page, page_info-java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
gve_free_page(dev, idx = rx->cnt & rx->mask
}
if (rx->data.raw_addressing) { for (i = 0; i < slots; i++)
gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i],
&rx->data.data_ring[i]);
} else { for (i work_done+;
page_ref_sub(rx->data.java.lang.StringIndexOutOfBoundsException: Index 34 out of bounds for length 2
rx->data.page_info[i].pagecnt_bias -java.lang.StringIndexOutOfBoundsException: Range [0, 46) out of bounds for length 31
for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) {
page_ref_sub( (desc-),rx-.seqno);
gve_schedule_reset(>);
put_page(rx->qpl_copy_pool[i].page);
}
}
kvfree(rx->datajava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
rx-data =NULL
}
staticvoid gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
java.lang.StringIndexOutOfBoundsException: Index 3 out of bounds for length 1
c>skb_head;
ctx->skb_tail = NULLr>rpackets+ cntsok_pkt_cnt
ctx->total_size = 0;
ctx->r>rbytes=cnts;
ctx-> = false
}
void gve_rx_free_ring_gqi(structgve_privpriv structgve_rx_ring *rx struct gve_rx_alloc_rings_cfg *java.lang.StringIndexOutOfBoundsException: Range [0, 39) out of bounds for length 5
{ structdevice dev &>pdev-;
u32 slots = rx->mask + 1;
tidx >q_num
size_t bytes;
u32 qpl_id/
if (rx->desc.desc_ring) {
bytes sizeof gve_rx_desc*cfg-;
dma_free_coherent(dev, bytes, rx->desc.java.lang.StringIndexOutOfBoundsException: Range [0, 50) out of bounds for length 35
rx->desc.desc_ring java.lang.StringIndexOutOfBoundsException: Index 3 out of bounds for length 3
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
if (rx->data.data_ringjava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
bytes =sizeof*x->datadata_ring) *slots
dma_free_coherent(dev, bytes, rx->data.data_ring,
_done= gve_clean_rx_done(, budget,feat)java.lang.StringIndexOutOfBoundsException: Index 50 out of bounds for length 50
rx-.data_ringNULL
}
staticint gve_rx_prefill_pages(struct gve_rx_ring *rx, struct gve_rx_alloc_rings_cfg *cfg)
{ struct gve_priv *priv = rx->gve;
u32 slots; int err; int i; int j;
/* Allocate one page per Rx queue slot. Each page is split into two * packet buffers, when possible we "page flip" between the two.
*/
slots = rx->mask + 1;
rx->data.page_info = kvcalloc_node(slots, sizeof(*rx->data.page_info),
GFP_KERNEL, priv->numa_node); if (!rx->data.page_info) return -ENOMEM;
for (i = 0; i < slots; i++) { if (!rx->data.raw_addressing) { struct page *page = rx->data.qpl->pages[i];
dma_addr_t addr = i * PAGE_SIZE;
/* The page already has 1 ref. */
page_ref_add(page, INT_MAX - 1);
rx->qpl_copy_pool[j].pagecnt_bias = INT_MAX;
}
}
return slots;
alloc_err_qpl: /* Fully free the copy pool pages. */ while (j--) {
page_ref_sub(rx->qpl_copy_pool[j].page,
rx->qpl_copy_pool[j].pagecnt_bias - 1);
put_page(rx->qpl_copy_pool[j].page);
}
/* Do not fully free QPL pages - only remove the bias added in this * function with gve_setup_rx_buffer.
*/ while (i--)
page_ref_sub(rx->data.page_info[i].page,
rx->data.page_info[i].pagecnt_bias - 1);
return err;
alloc_err_rda: while (i--)
gve_rx_free_buffer(&priv->pdev->dev,
&rx->data.page_info[i],
&rx->data.data_ring[i]); return err;
}
void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx)
{ int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
// We will never chain more than two SKBs: 2 * 16 * 2k > 64k // which is why we do not need to chain by using skb->next
skb_shinfo(ctx->skb_tail)->frag_list = skb;
/* "flip" to other packet buffer on this page */
page_info->page_offset ^= GVE_DEFAULT_RX_BUFFER_OFFSET;
*(slot_addr) ^= offset;
}
staticint gve_rx_can_recycle_buffer(struct gve_rx_slot_page_info *page_info)
{ int pagecount = page_count(page_info->page);
/* This page is not being used by any SKBs - reuse */ if (pagecount == page_info->pagecnt_bias) return 1; /* This page is still being used by an SKB - we can't reuse */ elseif (pagecount > page_info->pagecnt_bias) return 0;
WARN(pagecount < page_info->pagecnt_bias, "Pagecount should never be less than the bias."); return -1;
}
/* Optimistically stop the kernel from freeing the page. * We will check again in refill to determine if we need to alloc a * new page.
*/
gve_dec_pagecnt_bias(page_info);
if (alloc_page) { struct gve_rx_slot_page_info alloc_page_info; struct page *page;
/* The least recently used page turned out to be * still in use by the kernel. Ignoring it and moving * on alleviates head-of-line blocking.
*/
rx->qpl_copy_pool_head++;
page = alloc_page(GFP_ATOMIC); if (!page) return NULL;
if (copy_page_info->can_flip) { /* We have used both halves of this copy page, it * is time for it to go to the back of the queue.
*/
copy_page_info->can_flip = false;
rx->qpl_copy_pool_head++;
prefetch(rx->qpl_copy_pool[rx->qpl_copy_pool_head & rx->qpl_copy_pool_mask].page);
} else {
copy_page_info->can_flip = true;
}
/* if raw_addressing mode is not enabled gvnic can only receive into * registered segments. If the buffer can't be recycled, our only * choice is to copy the data out of it so that we can return it to the * device.
*/ if (page_info->can_flip) {
skb = gve_rx_add_frags(napi, page_info, page_info->buf_size,
len, ctx); /* No point in recycling if we didn't get the skb */ if (skb) { /* Make sure that the page isn't freed. */
gve_dec_pagecnt_bias(page_info);
gve_rx_flip_buff(page_info, &data_slot->qpl_offset);
}
} else {
skb = gve_rx_copy_to_pool(rx, page_info, len, napi);
} return skb;
}
if (is_first_frag) { if (likely(feat & NETIF_F_RXCSUM)) { /* NIC passes up the partial sum */ if (desc->csum)
skb->ip_summed = CHECKSUM_COMPLETE; else
skb->ip_summed = CHECKSUM_NONE;
skb->csum = csum_unfold(desc->csum);
}
/* parse flags & pass relevant info up */ if (likely(feat & NETIF_F_RXHASH) &&
gve_needs_rss(desc->flags_seq))
skb_set_hash(skb, be32_to_cpu(desc->rss_hash),
gve_rss_type(desc->flags_seq));
}
if (is_last_frag) {
skb_record_rx_queue(skb, rx->q_num); if (skb_is_nonlinear(skb))
napi_gro_frags(napi); else
napi_gro_receive(napi, skb); goto finish_ok_pkt;
}
page_info = &rx->data.page_info[idx]; if (page_info->can_flip) { /* The other half of the page is free because it was * free when we processed the descriptor. Flip to it.
*/ union gve_rx_data_slot *data_slot =
&rx->data.data_ring[idx];
gve_rx_flip_buff(page_info, &data_slot->addr);
page_info->can_flip = 0;
} else { /* It is possible that the networking stack has already * finished processing all outstanding packets in the buffer * and it can be reused. * Flipping is unnecessary here - if the networking stack still * owns half the page it is impossible to tell which half. Either * the whole page is free or it needs to be replaced.
*/ int recycle = gve_rx_can_recycle_buffer(page_info);
if (recycle < 0) { if (!rx->data.raw_addressing)
gve_schedule_reset(priv); returnfalse;
} if (!recycle) { /* We can't reuse the buffer - alloc a new one*/ union gve_rx_data_slot *data_slot =
&rx->data.data_ring[idx]; struct device *dev = &priv->pdev->dev;
gve_rx_free_buffer(dev, page_info, data_slot);
page_info->page = NULL; if (gve_rx_alloc_buffer(priv, dev, page_info,
data_slot, rx)) { break;
}
}
}
fill_cnt++;
}
rx->fill_cnt = fill_cnt; returntrue;
}
if (xdp_txs != rx->xdp_actions[XDP_TX])
gve_xdp_tx_flush(priv, rx->q_num);
if (xdp_redirects != rx->xdp_actions[XDP_REDIRECT])
xdp_do_flush();
/* restock ring slots */ if (!rx->data.raw_addressing) { /* In QPL mode buffs are refilled as the desc are processed */
rx->fill_cnt += work_done;
} elseif (rx->fill_cnt - rx->cnt <= rx->db_threshold) { /* In raw addressing mode buffs are only refilled if the avail * falls below a threshold.
*/ if (!gve_rx_refill_buffers(priv, rx)) return 0;
/* If we were not able to completely refill buffers, we'll want * to schedule this queue for work again to refill buffers.
*/ if (rx->fill_cnt - rx->cnt <= rx->db_threshold) {
gve_rx_write_doorbell(priv, rx); return budget;
}
}
int gve_rx_poll(struct gve_notify_block *block, int budget)
{ struct gve_rx_ring *rx = block->rx;
netdev_features_t feat; int work_done = 0;
feat = block->napi.dev->features;
if (budget > 0)
work_done = gve_clean_rx_done(rx, budget, feat);
return work_done;
}
Messung V0.5
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.6Bemerkung:
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.