/** * ice_release_rx_desc - Store the new tail and head values * @rx_ring: ring to bump * @val: new head index
*/ void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val)
{
u16 prev_ntu = rx_ring->next_to_use & ~0x7;
rx_ring->next_to_use = val;
/* update next to alloc since we have filled the ring */
rx_ring->next_to_alloc = val;
/* QRX_TAIL will be updated with any tail value, but hardware ignores * the lower 3 bits. This makes it so we only bump tail on meaningful * boundaries. Also, this allows us to bump tail on intervals of 8 up to * the budget depending on the current traffic load.
*/
val &= ~0x7; if (prev_ntu != val) { /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64).
*/
wmb();
writel(val, rx_ring->tail);
}
}
/** * ice_get_rx_hash - get RX hash value from descriptor * @rx_desc: specific descriptor * * Returns hash, if present, 0 otherwise.
*/ static u32 ice_get_rx_hash(constunion ice_32b_rx_flex_desc *rx_desc)
{ conststruct ice_32b_rx_flex_desc_nic *nic_mdid;
if (unlikely(rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)) return 0;
/** * ice_rx_hash_to_skb - set the hash value in the skb * @rx_ring: descriptor ring * @rx_desc: specific descriptor * @skb: pointer to current skb * @rx_ptype: the ptype value from the descriptor
*/ staticvoid
ice_rx_hash_to_skb(conststruct ice_rx_ring *rx_ring, constunion ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb, u16 rx_ptype)
{ struct libeth_rx_pt decoded;
u32 hash;
decoded = libie_rx_pt_parse(rx_ptype); if (!libeth_rx_pt_has_hash(rx_ring->netdev, decoded)) return;
hash = ice_get_rx_hash(rx_desc); if (likely(hash))
libeth_rx_pt_set_hash(skb, hash, decoded);
}
/** * ice_rx_gcs - Set generic checksum in skb * @skb: skb currently being received and modified * @rx_desc: receive descriptor
*/ staticvoid ice_rx_gcs(struct sk_buff *skb, constunion ice_32b_rx_flex_desc *rx_desc)
{ conststruct ice_32b_rx_flex_desc_nic *desc;
u16 csum;
/** * ice_rx_csum - Indicate in skb if checksum is good * @ring: the ring we care about * @skb: skb currently being received and modified * @rx_desc: the receive descriptor * @ptype: the packet type decoded by hardware * * skb->protocol must be set before this function is called
*/ staticvoid
ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb, union ice_32b_rx_flex_desc *rx_desc, u16 ptype)
{ struct libeth_rx_pt decoded;
u16 rx_status0, rx_status1; bool ipv4, ipv6;
/* Start with CHECKSUM_NONE and by default csum_level = 0 */
skb->ip_summed = CHECKSUM_NONE;
decoded = libie_rx_pt_parse(ptype); if (!libeth_rx_pt_has_checksum(ring->netdev, decoded)) return;
if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) {
ring->vsi->back->hw_rx_eipe_error++; return;
}
if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))) goto checksum_fail;
if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S)))) goto checksum_fail;
/* check for L4 errors and handle packets that were not able to be * checksummed due to arrival speed
*/ if (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)) goto checksum_fail;
/* check for outer UDP checksum error in tunneled packets */ if ((rx_status1 & BIT(ICE_RX_FLEX_DESC_STATUS1_NAT_S)) &&
(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S))) goto checksum_fail;
/* If there is an outer header present that might contain a checksum * we need to bump the checksum level by 1 to reflect the fact that * we are indicating we validated the inner checksum.
*/ if (decoded.tunnel_type >= LIBETH_RX_PT_TUNNEL_IP_GRENAT)
skb->csum_level = 1;
/** * ice_ptp_rx_hwts_to_skb - Put RX timestamp into skb * @rx_ring: Ring to get the VSI info * @rx_desc: Receive descriptor * @skb: Particular skb to send timestamp with * * The timestamp is in ns, so we must convert the result first.
*/ staticvoid
ice_ptp_rx_hwts_to_skb(struct ice_rx_ring *rx_ring, constunion ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
{
u64 ts_ns = ice_ptp_get_rx_hwts(rx_desc, &rx_ring->pkt_ctx);
/** * ice_get_ptype - Read HW packet type from the descriptor * @rx_desc: RX descriptor
*/ static u16 ice_get_ptype(constunion ice_32b_rx_flex_desc *rx_desc)
{ return le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
ICE_RX_FLEX_DESC_PTYPE_M;
}
/** * ice_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: Rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being populated * * This function checks the ring, descriptor, and packet information in * order to populate the hash, checksum, VLAN, protocol, and * other fields within the skb.
*/ void
ice_process_skb_fields(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
{
u16 ptype = ice_get_ptype(rx_desc);
ice_rx_hash_to_skb(rx_ring, rx_desc, skb, ptype);
/* modifies the skb - consumes the enet header */ if (unlikely(rx_ring->flags & ICE_RX_FLAGS_MULTIDEV)) { struct net_device *netdev = ice_eswitch_get_target(rx_ring,
rx_desc);
if (rx_ring->ptp_rx)
ice_ptp_rx_hwts_to_skb(rx_ring, rx_desc, skb);
}
/** * ice_receive_skb - Send a completed packet up the stack * @rx_ring: Rx ring in play * @skb: packet to send up * @vlan_tci: VLAN TCI for packet * * This function sends the completed packet (via. skb) up the stack using * gro receive functions (with/without VLAN tag)
*/ void
ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci)
{ if ((vlan_tci & VLAN_VID_MASK) && rx_ring->vlan_proto)
__vlan_hwaccel_put_tag(skb, rx_ring->vlan_proto,
vlan_tci);
if (!ntu)
ntu += cnt;
ntu--;
} return ICE_XDP_CONSUMED;
busy:
xdp_ring->ring_stats->tx_stats.tx_busy++;
return ICE_XDP_CONSUMED;
}
/** * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map * @xdp_ring: XDP ring * @xdp_res: Result of the receive batch * @first_idx: index to write from caller * * This function bumps XDP Tx tail and/or flush redirect map, and * should be called when a batch of packets has been processed in the * napi loop.
*/ void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsignedint xdp_res,
u32 first_idx)
{ struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[first_idx];
if (xdp_res & ICE_XDP_REDIR)
xdp_do_flush();
if (xdp_res & ICE_XDP_TX) { if (static_branch_unlikely(&ice_xdp_locking_key))
spin_lock(&xdp_ring->tx_lock); /* store index of descriptor with RS bit set in the first * ice_tx_buf of given NAPI batch
*/
tx_buf->rs_idx = ice_set_rs_bit(xdp_ring);
ice_xdp_ring_update_tail(xdp_ring); if (static_branch_unlikely(&ice_xdp_locking_key))
spin_unlock(&xdp_ring->tx_lock);
}
}
*ts_ns = ice_ptp_get_rx_hwts(xdp_ext->eop_desc,
xdp_ext->pkt_ctx); if (!*ts_ns) return -ENODATA;
return 0;
}
/** * ice_xdp_rx_hash_type - Get XDP-specific hash type from the RX descriptor * @eop_desc: End of Packet descriptor
*/ staticenum xdp_rss_hash_type
ice_xdp_rx_hash_type(constunion ice_32b_rx_flex_desc *eop_desc)
{ return libie_rx_pt_parse(ice_get_ptype(eop_desc)).hash_type;
}
/** * ice_xdp_rx_hash - RX hash XDP hint handler * @ctx: XDP buff pointer * @hash: hash destination address * @rss_type: XDP hash type destination address * * Copy RX hash (if available) and its type to the destination address.
*/ staticint ice_xdp_rx_hash(conststruct xdp_md *ctx, u32 *hash, enum xdp_rss_hash_type *rss_type)
{ conststruct ice_xdp_buff *xdp_ext = (void *)ctx;
*hash = ice_get_rx_hash(xdp_ext->eop_desc);
*rss_type = ice_xdp_rx_hash_type(xdp_ext->eop_desc); if (!likely(*hash)) return -ENODATA;
return 0;
}
/** * ice_xdp_rx_vlan_tag - VLAN tag XDP hint handler * @ctx: XDP buff pointer * @vlan_proto: destination address for VLAN protocol * @vlan_tci: destination address for VLAN TCI * * Copy VLAN tag (if was stripped) and corresponding protocol * to the destination address.
*/ staticint ice_xdp_rx_vlan_tag(conststruct xdp_md *ctx, __be16 *vlan_proto,
u16 *vlan_tci)
{ conststruct ice_xdp_buff *xdp_ext = (void *)ctx;
*vlan_proto = xdp_ext->pkt_ctx->vlan_proto; if (!*vlan_proto) return -ENODATA;
*vlan_tci = ice_get_vlan_tci(xdp_ext->eop_desc); if (!*vlan_tci) return -ENODATA;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.