staticvoid ocelot_fdma_activate_chan(struct ocelot *ocelot, dma_addr_t dma, int chan)
{
ocelot_fdma_writel(ocelot, MSCC_FDMA_DCB_LLP(chan), dma); /* Barrier to force memory writes to DCB to be completed before starting * the channel.
*/
wmb();
ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_ACTIVATE, BIT(chan));
}
idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE); /* Chain the DCB to the next one */
dcb->llp = ocelot_fdma_idx_dma(rx_ring->dcbs_dma, idx);
}
/* Check if the FDMA hits the DCB with LLP == NULL */
llp = ocelot_fdma_readl(ocelot, MSCC_FDMA_DCB_LLP(MSCC_FDMA_XTR_CHAN)); if (unlikely(llp)) returnfalse;
rx_ring = &fdma->rx_ring;
ret = ocelot_fdma_wait_chan_safe(ocelot, chan); if (ret) {
dev_err_ratelimited(ocelot->dev, "Unable to stop RX channel\n"); return;
}
ocelot_fdma_rx_set_llp(rx_ring);
/* FDMA stopped on the last DCB that contained a NULL LLP, since * we processed some DCBs in RX, there is free space, and we must set * DCB_LLP to point to the next DCB
*/
llp_prev = ocelot_fdma_readl(ocelot, MSCC_FDMA_DCB_LLP_PREV(chan));
dma_base = rx_ring->dcbs_dma;
/* Get the next DMA addr located after LLP == NULL DCB */
idx = ocelot_fdma_dma_idx(dma_base, llp_prev);
idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE);
new_llp = ocelot_fdma_idx_dma(dma_base, idx);
/* Finally reactivate the channel */
ocelot_fdma_activate_chan(ocelot, new_llp, chan);
}
/* Sync for use by the device */
dma_sync_single_range_for_device(ocelot->dev, old_rxb->dma_addr,
old_rxb->page_offset,
OCELOT_FDMA_RX_SIZE, DMA_FROM_DEVICE);
}
if (ocelot_fdma_add_rx_frag(rxb, stat, skb, first)) { /* Reuse the free half of the page for the next_to_alloc DCB*/
ocelot_fdma_reuse_rx_page(ocelot, rxb);
} else { /* page cannot be reused, unmap it */
dma_unmap_page(ocelot->dev, rxb->dma_addr, PAGE_SIZE,
DMA_FROM_DEVICE);
}
while (budget--) {
idx = rx_ring->next_to_clean;
dcb = &rx_ring->dcbs[idx];
stat = dcb->stat; if (MSCC_FDMA_DCB_STAT_BLOCKL(stat) == 0) break;
/* New packet is a start of frame but we already got a skb set, * we probably lost an EOF packet, free skb
*/ if (unlikely(skb && (stat & MSCC_FDMA_DCB_STAT_SOF))) {
dev_kfree_skb(skb);
skb = NULL;
}
rxb = &rx_ring->bufs[idx]; /* Fetch next to clean buffer from the rx_ring */
skb = ocelot_fdma_get_skb(ocelot, stat, rxb, skb); if (unlikely(!skb)) break;
if (unlikely(stat & MSCC_FDMA_DCB_STAT_ABORT ||
stat & MSCC_FDMA_DCB_STAT_PD)) {
dev_err_ratelimited(ocelot->dev, "DCB aborted or pruned\n");
dev_kfree_skb(skb);
skb = NULL; continue;
}
/* We still need to process the other fragment of the packet * before delivering it to the network stack
*/ if (!(stat & MSCC_FDMA_DCB_STAT_EOF)) continue;
if (unlikely(!ocelot_fdma_receive_skb(ocelot, skb)))
dev_kfree_skb(skb);
skb = NULL;
}
rx_ring->skb = skb;
if (cleaned_cnt)
ocelot_fdma_alloc_rx_buffs(ocelot, cleaned_cnt);
/* Purge the TX packets that have been sent up to the NULL llp or the * end of done list.
*/ while (!ocelot_fdma_tx_ring_empty(fdma)) {
ntc = tx_ring->next_to_clean;
dcb = &tx_ring->dcbs[ntc]; if (!(dcb->stat & MSCC_FDMA_DCB_STAT_PD)) break;
/* Only update after accessing all dcb fields */
tx_ring->next_to_clean = ocelot_fdma_idx_next(ntc,
OCELOT_FDMA_TX_RING_SIZE);
/* If we hit the NULL LLP, stop, we might need to reload FDMA */ if (dcb_llp == 0) {
end_of_list = true; break;
}
}
/* No need to try to wake if there were no TX cleaned_cnt up. */ if (ocelot_fdma_tx_ring_free(fdma))
ocelot_fdma_wakeup_netdev(ocelot);
/* If there is still some DCBs to be processed by the FDMA or if the * pending list is empty, there is no need to restart the FDMA.
*/ if (!end_of_list || ocelot_fdma_tx_ring_empty(fdma)) return;
ret = ocelot_fdma_wait_chan_safe(ocelot, MSCC_FDMA_INJ_CHAN); if (ret) {
dev_warn(ocelot->dev, "Failed to wait for TX channel to stop\n"); return;
}
/* Set NULL LLP to be the last DCB used */
new_null_llp_idx = ocelot_fdma_idx_prev(tx_ring->next_to_use,
OCELOT_FDMA_TX_RING_SIZE);
dcb = &tx_ring->dcbs[new_null_llp_idx];
dcb->llp = 0;
/* Free the pages held in the RX ring */ while (idx != rx_ring->next_to_use) {
rxb = &rx_ring->bufs[idx];
dma_unmap_page(ocelot->dev, rxb->dma_addr, PAGE_SIZE,
DMA_FROM_DEVICE);
__free_page(rxb->page);
idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE);
}
if (fdma->rx_ring.skb)
dev_kfree_skb_any(fdma->rx_ring.skb);
}
/* Create a pool of consistent memory blocks for hardware descriptors */
fdma->dcbs_base = dmam_alloc_coherent(ocelot->dev,
OCELOT_DCBS_HW_ALLOC_SIZE,
&fdma->dcbs_dma_base, GFP_KERNEL); if (!fdma->dcbs_base) return -ENOMEM;
/* DCBs must be aligned on a 32bit boundary */
dcbs = fdma->dcbs_base;
dcbs_dma = fdma->dcbs_dma_base; if (!IS_ALIGNED(dcbs_dma, 4)) {
adjust = dcbs_dma & 0x3;
dcbs_dma = ALIGN(dcbs_dma, 4);
dcbs = (void *)dcbs + adjust;
}
/* Set the last DCB LLP as NULL, this is normally done when restarting * the RX chan, but this is for the first run
*/
ocelot_fdma_rx_set_llp(&fdma->rx_ring);
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.14Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.