/* Preferred number of descriptors to fill at once */ #define EFX_RX_PREFERRED_BATCH 8U
/* Maximum rx prefix used by any architecture. */ #define EFX_MAX_RX_PREFIX_SIZE 16
/* Size of buffer allocated for skb header area. */ #define EFX_SKB_HEADERS 128u
/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */ #define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
EFX_RX_USR_BUF_SIZE)
/* Discard packet, if instructed to do so. Process the * previous receive first.
*/ if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
efx_rx_flush_packet(channel);
efx_discard_rx_packet(channel, rx_buf, n_frags); return;
}
if (n_frags > 1) { /* Release/sync DMA mapping for additional fragments. * Fix length for last fragment.
*/ unsignedint tail_frags = n_frags - 1;
for (;;) {
rx_buf = efx_rx_buf_next(rx_queue, rx_buf); if (--tail_frags == 0) break;
efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
}
rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
}
/* All fragments have been DMA-synced, so recycle pages. */
rx_buf = efx_rx_buffer(rx_queue, index);
efx_recycle_rx_pages(channel, rx_buf, n_frags);
/* Pipeline receives so that we give time for packet headers to be * prefetched into cache.
*/
efx_rx_flush_packet(channel);
channel->rx_pkt_n_frags = n_frags;
channel->rx_pkt_index = index;
}
/* Set the SKB flags */
skb_checksum_none_assert(skb); if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
}
efx_rx_skb_attach_timestamp(channel, skb);
if (channel->type->receive_skb) if (channel->type->receive_skb(channel, skb)) return;
/* Pass the packet up */ if (channel->rx_list != NULL) /* Add to list, will pass up later */
list_add_tail(&skb->list, channel->rx_list); else /* No list, so pass it up now */
netif_receive_skb(skb);
}
/** efx_do_xdp: perform XDP processing on a received packet * * Returns true if packet should still be delivered.
*/ staticbool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, struct efx_rx_buffer *rx_buf, u8 **ehp)
{
u8 rx_prefix[EFX_MAX_RX_PREFIX_SIZE]; struct efx_rx_queue *rx_queue; struct bpf_prog *xdp_prog; struct xdp_frame *xdpf; struct xdp_buff xdp;
u32 xdp_act;
s16 offset; int err;
xdp_prog = rcu_dereference_bh(efx->xdp_prog); if (!xdp_prog) returntrue;
rx_queue = efx_channel_get_rx_queue(channel);
if (unlikely(channel->rx_pkt_n_frags > 1)) { /* We can't do XDP on fragmented packets - drop. */
efx_free_rx_buffers(rx_queue, rx_buf,
channel->rx_pkt_n_frags); if (net_ratelimit())
netif_err(efx, rx_err, efx->net_dev, "XDP is not possible with multiple receive fragments (%d)\n",
channel->rx_pkt_n_frags);
channel->n_rx_xdp_bad_drops++; returnfalse;
}
/* Read length from the prefix if necessary. This already * excludes the length of the prefix itself.
*/ if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN) {
rx_buf->len = le16_to_cpup((__le16 *)
(eh + efx->rx_packet_len_offset)); /* A known issue may prevent this being filled in; * if that happens, just drop the packet. * Must do that in the driver since passing a zero-length * packet up to the stack may cause a crash.
*/ if (unlikely(!rx_buf->len)) {
efx_free_rx_buffers(rx_queue, rx_buf,
channel->rx_pkt_n_frags);
channel->n_rx_frm_trunc++; goto out;
}
}
/* If we're in loopback test, then pass the packet directly to the * loopback layer, and free the rx_buf here
*/ if (unlikely(efx->loopback_selftest)) {
efx_loopback_rx_packet(efx, eh, rx_buf->len);
efx_free_rx_buffers(rx_queue, rx_buf,
channel->rx_pkt_n_frags); goto out;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.