/* This smp_store_release() pairs with smp_load_acquire() in * hbg_napi_tx_recycle() called in tx interrupt handle process.
*/
smp_store_release(&ring->ntu, hbg_queue_next_prt(ntu, ring));
dev_sw_netstats_tx_add(netdev, 1, skb->len); return NETDEV_TX_OK;
}
staticvoid hbg_buffer_free_skb(struct hbg_buffer *buffer)
{ if (unlikely(!buffer->skb)) return;
staticint hbg_napi_tx_recycle(struct napi_struct *napi, int budget)
{ struct hbg_ring *ring = container_of(napi, struct hbg_ring, napi); /* This smp_load_acquire() pairs with smp_store_release() in * hbg_net_start_xmit() called in xmit process.
*/
u32 ntu = smp_load_acquire(&ring->ntu); struct hbg_priv *priv = ring->priv; struct hbg_buffer *buffer;
u32 ntc = ring->ntc; int packet_done = 0;
/* We need do cleanup even if budget is 0. * Per NAPI documentation budget is for Rx. * So We hardcode the amount of work Tx NAPI does to 128.
*/
budget = 128; while (packet_done < budget) { if (unlikely(hbg_queue_is_empty(ntc, ntu, ring))) break;
/* make sure HW write desc complete */
dma_rmb();
buffer = &ring->queue[ntc]; if (buffer->state != HBG_TX_STATE_COMPLETE) break;
/* This smp_store_release() pairs with smp_load_acquire() in * hbg_net_start_xmit() called in xmit process.
*/
smp_store_release(&ring->ntc, ntc);
netif_wake_queue(priv->netdev);
if (likely(packet_done < budget &&
napi_complete_done(napi, packet_done)))
hbg_hw_irq_enable(priv, HBG_INT_MSK_TX_B, true);
if (likely(!FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M, desc->word4) &&
!FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M, desc->word4))) returntrue;
switch (FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M, desc->word4)) { case HBG_L3_OK: break; case HBG_L3_WRONG_HEAD:
priv->stats.rx_desc_l3_wrong_head_cnt++; returnfalse; case HBG_L3_CSUM_ERR:
skb->ip_summed = CHECKSUM_NONE;
priv->stats.rx_desc_l3_csum_err_cnt++;
/* Don't drop packets on csum validation failure, * suggest by Jakub
*/ break; case HBG_L3_LEN_ERR:
priv->stats.rx_desc_l3_len_err_cnt++; returnfalse; case HBG_L3_ZERO_TTL:
priv->stats.rx_desc_l3_zero_ttl_cnt++; returnfalse; default:
priv->stats.rx_desc_l3_other_cnt++; returnfalse;
}
switch (FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M, desc->word4)) { case HBG_L4_OK: break; case HBG_L4_WRONG_HEAD:
priv->stats.rx_desc_l4_wrong_head_cnt++; returnfalse; case HBG_L4_LEN_ERR:
priv->stats.rx_desc_l4_len_err_cnt++; returnfalse; case HBG_L4_CSUM_ERR:
skb->ip_summed = CHECKSUM_NONE;
priv->stats.rx_desc_l4_csum_err_cnt++;
/* Don't drop packets on csum validation failure, * suggest by Jakub
*/ break; case HBG_L4_ZERO_PORT_NUM:
priv->stats.rx_desc_l4_zero_port_num_cnt++; returnfalse; default:
priv->stats.rx_desc_l4_other_cnt++; returnfalse;
}
if (unlikely(FIELD_GET(HBG_RX_DESC_W4_IP_VERSION_ERR_B, desc->word4))) {
priv->stats.rx_desc_ip_ver_err_cnt++; return;
}
/* 0:ipv4, 1:ipv6 */ if (FIELD_GET(HBG_RX_DESC_W4_IP_VERSION_B, desc->word4))
priv->stats.rx_desc_ipv6_pkt_cnt++; else
priv->stats.rx_desc_ipv4_pkt_cnt++;
switch (FIELD_GET(HBG_RX_DESC_W4_IP_TCP_UDP_M, desc->word4)) { case HBG_IP_PKT:
priv->stats.rx_desc_ip_pkt_cnt++; if (FIELD_GET(HBG_RX_DESC_W4_OPT_B, desc->word4))
priv->stats.rx_desc_ip_opt_pkt_cnt++; if (FIELD_GET(HBG_RX_DESC_W4_FRAG_B, desc->word4))
priv->stats.rx_desc_frag_cnt++;
if (FIELD_GET(HBG_RX_DESC_W4_ICMP_B, desc->word4))
priv->stats.rx_desc_icmp_pkt_cnt++; elseif (FIELD_GET(HBG_RX_DESC_W4_IPSEC_B, desc->word4))
priv->stats.rx_desc_ipsec_pkt_cnt++; break; case HBG_TCP_PKT:
priv->stats.rx_desc_tcp_pkt_cnt++; break; case HBG_UDP_PKT:
priv->stats.rx_desc_udp_pkt_cnt++; break; default:
priv->stats.rx_desc_no_ip_pkt_cnt++; break;
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.