/* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver * * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com) * * This program is dual-licensed; you may select either version 2 of * the GNU General Public License ("GPL") or BSD license ("BSD"). * * This Synopsys DWC XLGMAC software driver and associated documentation * (hereinafter the "Software") is an unsupported proprietary work of * Synopsys, Inc. unless otherwise expressly agreed to in writing between * Synopsys and you. The Software IS NOT an item of Licensed Software or a * Licensed Product under any End User Software License Agreement or * Agreement for Licensed Products with Synopsys or any supplement thereto. * Synopsys is a registered trademark of Synopsys, Inc. Other names included * in the SOFTWARE may be the trademarks of their respective owners.
*/
if (count > xlgmac_tx_avail_desc(ring)) {
netif_info(pdata, drv, pdata->netdev, "Tx queue stopped, not enough descriptors available\n");
netif_stop_subqueue(pdata->netdev, channel->queue_index);
ring->tx.queue_stopped = 1;
/* If we haven't notified the hardware because of xmit_more * support, tell it now
*/ if (ring->tx.xmit_more)
pdata->hw_ops.tx_start_xmit(channel, ring);
/* Update the number of packets that will ultimately be transmitted * along with the extra bytes for each extra packet
*/
pkt_info->tx_packets = skb_shinfo(skb)->gso_segs;
pkt_info->tx_bytes += (pkt_info->tx_packets - 1) * pkt_info->header_len;
if (xlgmac_is_tso(skb)) { /* TSO requires an extra descriptor if mss is different */ if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
context_desc = 1;
pkt_info->desc_count++;
}
/* TSO requires an extra descriptor for TSO header */
pkt_info->desc_count++;
if (skb_vlan_tag_present(skb)) { /* VLAN requires an extra descriptor if tag is different */ if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag) /* We can share with the TSO context descriptor */ if (!context_desc) {
context_desc = 1;
pkt_info->desc_count++;
}
/* The DMA interrupt status register also reports MAC and MTL * interrupts. So for polling mode, we just need to check for * this register to be non-zero
*/
dma_isr = readl(pdata->mac_regs + DMA_ISR); if (!dma_isr) return IRQ_HANDLED;
for (i = 0; i < pdata->channel_count; i++) { if (!(dma_isr & (1 << i))) continue;
channel = pdata->channel_head + i;
dma_ch_isr = readl(XLGMAC_DMA_REG(channel, DMA_CH_SR));
netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
i, dma_ch_isr);
/* The TI or RI interrupt bits may still be set even if using * per channel DMA interrupts. Check to be sure those are not * enabled before using the private data napi structure.
*/
ti = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TI_POS,
DMA_CH_SR_TI_LEN);
ri = XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RI_POS,
DMA_CH_SR_RI_LEN); if (!pdata->per_channel_irq && (ti || ri)) { if (napi_schedule_prep(&pdata->napi)) { /* Disable Tx and Rx interrupts */
xlgmac_disable_rx_tx_ints(pdata);
pdata->stats.napi_poll_isr++; /* Turn on polling */
__napi_schedule_irqoff(&pdata->napi);
}
}
if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TPS_POS,
DMA_CH_SR_TPS_LEN))
pdata->stats.tx_process_stopped++;
if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RPS_POS,
DMA_CH_SR_RPS_LEN))
pdata->stats.rx_process_stopped++;
if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TBU_POS,
DMA_CH_SR_TBU_LEN))
pdata->stats.tx_buffer_unavailable++;
if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RBU_POS,
DMA_CH_SR_RBU_LEN))
pdata->stats.rx_buffer_unavailable++;
/* Restart the device on a Fatal Bus Error */ if (XLGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_FBE_POS,
DMA_CH_SR_FBE_LEN)) {
pdata->stats.fatal_bus_error++;
schedule_work(&pdata->restart_work);
}
/* Clear all interrupt signals */
writel(dma_ch_isr, XLGMAC_DMA_REG(channel, DMA_CH_SR));
}
if (XLGMAC_GET_REG_BITS(dma_isr, DMA_ISR_MACIS_POS,
DMA_ISR_MACIS_LEN)) {
mac_isr = readl(pdata->mac_regs + MAC_ISR);
if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCTXIS_POS,
MAC_ISR_MMCTXIS_LEN))
hw_ops->tx_mmc_int(pdata);
if (XLGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCRXIS_POS,
MAC_ISR_MMCRXIS_LEN))
hw_ops->rx_mmc_int(pdata);
}
/* Per channel DMA interrupts are enabled, so we use the per * channel napi structure and not the private data napi structure
*/ if (napi_schedule_prep(&channel->napi)) { /* Disable Tx and Rx interrupts */
disable_irq_nosync(channel->dma_irq);
/* Turn on polling */
__napi_schedule_irqoff(&channel->napi);
}
napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
if (napi_schedule_prep(napi)) { /* Disable Tx and Rx interrupts */ if (pdata->per_channel_irq)
disable_irq_nosync(channel->dma_irq); else
xlgmac_disable_rx_tx_ints(pdata);
pdata->stats.napi_poll_txtimer++; /* Turn on polling */
__napi_schedule(napi);
}
if (pdata->per_channel_irq) {
channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (add)
netif_napi_add(pdata->netdev, &channel->napi,
xlgmac_one_poll);
napi_enable(&channel->napi);
}
} else { if (add)
netif_napi_add(pdata->netdev, &pdata->napi,
xlgmac_all_poll);
err_irq: /* Using an unsigned int, 'i' will go to UINT_MAX and exit */ for (i--, channel--; i < pdata->channel_count; i--, channel--)
devm_free_irq(pdata->dev, channel->dma_irq, channel);
staticvoid xlgmac_restart_dev(struct xlgmac_pdata *pdata)
{ /* If not running, "restart" will happen on open */ if (!netif_running(pdata->netdev)) return;
skb = napi_alloc_skb(napi, desc_data->rx.hdr.dma_len); if (!skb) return NULL;
/* Start with the header buffer which may contain just the header * or the header plus data
*/
dma_sync_single_range_for_cpu(pdata->dev, desc_data->rx.hdr.dma_base,
desc_data->rx.hdr.dma_off,
desc_data->rx.hdr.dma_len,
DMA_FROM_DEVICE);
len -= copy_len; if (len) { /* Add the remaining data as a frag */
dma_sync_single_range_for_cpu(pdata->dev,
desc_data->rx.buf.dma_base,
desc_data->rx.buf.dma_off,
desc_data->rx.buf.dma_len,
DMA_FROM_DEVICE);
/* Nothing to do if there isn't a Rx ring for this channel */ if (!ring) return 0;
incomplete = 0;
context_next = 0;
napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
pkt_info = &ring->pkt_info; while (packet_count < budget) { /* First time in loop see if we need to restore state */ if (!received && desc_data->state_saved) {
skb = desc_data->state.skb;
error = desc_data->state.error;
len = desc_data->state.len;
} else {
memset(pkt_info, 0, sizeof(*pkt_info));
skb = NULL;
error = 0;
len = 0;
}
/* Earlier error, just drain the remaining data */ if ((incomplete || context_next) && error) goto read_again;
if (error || pkt_info->errors) { if (pkt_info->errors)
netif_err(pdata, rx_err, netdev, "error in received packet\n");
dev_kfree_skb(skb); goto next_packet;
}
if (!context) { /* Length is cumulative, get this descriptor's length */
dma_desc_len = desc_data->rx.len - len;
len += dma_desc_len;
/* Check if we need to save state before leaving */ if (received && (incomplete || context_next)) {
desc_data = XLGMAC_GET_DESC_DATA(ring, ring->cur);
desc_data->state_saved = 1;
desc_data->state.skb = skb;
desc_data->state.len = len;
desc_data->state.error = error;
}
XLGMAC_PR("packet_count = %d\n", packet_count);
return packet_count;
}
staticint xlgmac_one_poll(struct napi_struct *napi, int budget)
{ struct xlgmac_channel *channel = container_of(napi, struct xlgmac_channel,
napi); int processed = 0;
XLGMAC_PR("budget=%d\n", budget);
/* Cleanup Tx ring first */
xlgmac_tx_poll(channel);
/* Process Rx ring next */
processed = xlgmac_rx_poll(channel, budget);
/* If we processed everything, we are done */ if (processed < budget) { /* Turn off polling */
napi_complete_done(napi, processed);
/* Enable Tx and Rx interrupts */
enable_irq(channel->dma_irq);
}
XLGMAC_PR("received = %d\n", processed);
return processed;
}
staticint xlgmac_all_poll(struct napi_struct *napi, int budget)
{ struct xlgmac_pdata *pdata = container_of(napi, struct xlgmac_pdata,
napi); struct xlgmac_channel *channel; int processed, last_processed; int ring_budget; unsignedint i;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.