staticint dwmac4_wrback_get_tx_status(struct stmmac_extra_stats *x, struct dma_desc *p, void __iomem *ioaddr)
{ unsignedint tdes3; int ret = tx_done;
tdes3 = le32_to_cpu(p->des3);
/* Get tx owner first */ if (unlikely(tdes3 & TDES3_OWN)) return tx_dma_own;
/* Verify tx error by looking at the last segment. */ if (likely(!(tdes3 & TDES3_LAST_DESCRIPTOR))) return tx_not_ls;
if (unlikely(tdes3 & TDES3_ERROR_SUMMARY)) {
ret = tx_err;
if (unlikely(tdes3 & TDES3_JABBER_TIMEOUT))
x->tx_jabber++; if (unlikely(tdes3 & TDES3_PACKET_FLUSHED))
x->tx_frame_flushed++; if (unlikely(tdes3 & TDES3_LOSS_CARRIER)) {
x->tx_losscarrier++;
} if (unlikely(tdes3 & TDES3_NO_CARRIER)) {
x->tx_carrier++;
} if (unlikely((tdes3 & TDES3_LATE_COLLISION) ||
(tdes3 & TDES3_EXCESSIVE_COLLISION)))
x->tx_collision +=
(tdes3 & TDES3_COLLISION_COUNT_MASK)
>> TDES3_COLLISION_COUNT_SHIFT;
if (unlikely(tdes3 & TDES3_EXCESSIVE_DEFERRAL))
x->tx_deferred++;
if (unlikely(tdes3 & TDES3_UNDERFLOW_ERROR)) {
x->tx_underflow++;
ret |= tx_err_bump_tc;
}
if (unlikely(tdes3 & TDES3_IP_HDR_ERROR))
x->tx_ip_header_error++;
if (unlikely(tdes3 & TDES3_PAYLOAD_ERROR))
x->tx_payload_error++;
}
if (unlikely(tdes3 & TDES3_DEFERRED))
x->tx_deferred++;
return ret;
}
staticint dwmac4_wrback_get_rx_status(struct stmmac_extra_stats *x, struct dma_desc *p)
{ unsignedint rdes1 = le32_to_cpu(p->des1); unsignedint rdes2 = le32_to_cpu(p->des2); unsignedint rdes3 = le32_to_cpu(p->des3); int message_type; int ret = good_frame;
if (unlikely(rdes3 & RDES3_OWN)) return dma_own;
if (unlikely(rdes3 & RDES3_CONTEXT_DESCRIPTOR)) return discard_frame; if (likely(!(rdes3 & RDES3_LAST_DESCRIPTOR))) return rx_not_ls;
if (unlikely(rdes3 & RDES3_ERROR_SUMMARY)) { if (unlikely(rdes3 & RDES3_GIANT_PACKET))
x->rx_length++; if (unlikely(rdes3 & RDES3_OVERFLOW_ERROR))
x->rx_gmac_overflow++;
if (unlikely(rdes3 & RDES3_RECEIVE_WATCHDOG))
x->rx_watchdog++;
if (unlikely(rdes3 & RDES3_RECEIVE_ERROR))
x->rx_mii++;
if (unlikely(rdes3 & RDES3_CRC_ERROR))
x->rx_crc_errors++;
if (unlikely(rdes3 & RDES3_DRIBBLE_ERROR))
x->dribbling_bit++;
if (rdes1 & RDES1_IP_HDR_ERROR)
x->ip_hdr_err++; if (rdes1 & RDES1_IP_CSUM_BYPASSED)
x->ip_csum_bypassed++; if (rdes1 & RDES1_IPV4_HEADER)
x->ipv4_pkt_rcvd++; if (rdes1 & RDES1_IPV6_HEADER)
x->ipv6_pkt_rcvd++; if (rdes1 & RDES1_IP_PAYLOAD_ERROR)
x->ip_payload_err++;
if (rdes1 & RDES1_PTP_PACKET_TYPE)
x->ptp_frame_type++; if (rdes1 & RDES1_PTP_VER)
x->ptp_ver++; if (rdes1 & RDES1_TIMESTAMP_DROPPED)
x->timestamp_dropped++;
if (unlikely(rdes2 & RDES2_SA_FILTER_FAIL)) {
x->sa_rx_filter_fail++;
ret = discard_frame;
} if (unlikely(rdes2 & RDES2_DA_FILTER_FAIL)) {
x->da_rx_filter_fail++;
ret = discard_frame;
}
if (rdes2 & RDES2_L3_FILTER_MATCH)
x->l3_filter_match++; if (rdes2 & RDES2_L4_FILTER_MATCH)
x->l4_filter_match++; if ((rdes2 & RDES2_L3_L4_FILT_NB_MATCH_MASK)
>> RDES2_L3_L4_FILT_NB_MATCH_SHIFT)
x->l3_l4_filter_no_match++;
staticint dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
{ /* Context type from W/B descriptor must be zero */ if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE) return 0;
/* Tx Timestamp Status is 1 so des0 and des1'll have valid values */ if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS) return 1;
if (likely(!own && ctxt)) { if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff)) /* Corrupted value */
ret = -EINVAL; else /* A valid Timestamp is ready to be read */
ret = 0;
}
/* Timestamp not ready */ return ret;
}
staticint dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc,
u32 ats)
{ struct dma_desc *p = (struct dma_desc *)desc; int ret = -EINVAL;
/* Get the status from normal w/b descriptor */ if (likely(le32_to_cpu(p->des3) & RDES3_RDES1_VALID)) { if (likely(le32_to_cpu(p->des1) & RDES1_TIMESTAMP_AVAILABLE)) { int i = 0;
/* Check if timestamp is OK from context descriptor */ do {
ret = dwmac4_rx_check_timestamp(next_desc); if (ret < 0) gotoexit;
i++;
} while ((ret == 1) && (i < 10));
if (i == 10)
ret = -EBUSY;
}
} exit: if (likely(ret == 0)) return 1;
return 0;
}
staticvoid dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, int end, int bfsize)
{
dwmac4_set_rx_owner(p, disable_rx_ic);
}
if (ls)
tdes3 |= TDES3_LAST_DESCRIPTOR; else
tdes3 &= ~TDES3_LAST_DESCRIPTOR;
/* Finally set the OWN bit. Later the DMA will start! */ if (tx_own)
tdes3 |= TDES3_OWN;
if (is_fs && tx_own) /* When the own bit, for the first frame, has to be set, all * descriptors for the same frame has to be set before, to * avoid race condition.
*/
dma_wmb();
p->des3 = cpu_to_le32(tdes3);
}
staticvoid dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs, int len1, int len2, bool tx_own, bool ls, unsignedint tcphdrlen, unsignedint tcppayloadlen)
{ unsignedint tdes3 = le32_to_cpu(p->des3);
if (len1)
p->des2 |= cpu_to_le32((len1 & TDES2_BUFFER1_SIZE_MASK));
if (len2)
p->des2 |= cpu_to_le32((len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT)
& TDES2_BUFFER2_SIZE_MASK);
if (ls)
tdes3 |= TDES3_LAST_DESCRIPTOR; else
tdes3 &= ~TDES3_LAST_DESCRIPTOR;
/* Finally set the OWN bit. Later the DMA will start! */ if (tx_own)
tdes3 |= TDES3_OWN;
if (is_fs && tx_own) /* When the own bit, for the first frame, has to be set, all * descriptors for the same frame has to be set before, to * avoid race condition.
*/
dma_wmb();
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.