/* xc->vc.lock must be held by caller */ staticstruct uniphier_xdmac_desc *
uniphier_xdmac_next_desc(struct uniphier_xdmac_chan *xc)
{ struct virt_dma_desc *vd;
vd = vchan_next_desc(&xc->vc); if (!vd) return NULL;
list_del(&vd->node);
return to_uniphier_xdmac_desc(vd);
}
/* xc->vc.lock must be held by caller */ staticvoid uniphier_xdmac_chan_start(struct uniphier_xdmac_chan *xc, struct uniphier_xdmac_desc *xd)
{
u32 src_mode, src_width;
u32 dst_mode, dst_width;
dma_addr_t src_addr, dst_addr;
u32 val, its, tnum; enum dma_slave_buswidth buswidth;
/* * The width of MEM side must be 4 or 8 bytes, that does not * affect that of DEV side and transfer size.
*/ if (xd->dir == DMA_DEV_TO_MEM) {
src_mode = XDMAC_SADM_SAM_FIXED;
buswidth = xc->sconfig.src_addr_width;
} else {
src_mode = XDMAC_SADM_SAM_INC;
buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
}
src_width = FIELD_PREP(XDMAC_SADM_STW_MASK, __ffs(buswidth));
/* start XDMAC */
val = readl(xc->reg_ch_base + XDMAC_TSS);
val |= XDMAC_TSS_REQ;
writel(val, xc->reg_ch_base + XDMAC_TSS);
}
/* xc->vc.lock must be held by caller */ staticint uniphier_xdmac_chan_stop(struct uniphier_xdmac_chan *xc)
{
u32 val;
/* disable interrupt */
val = readl(xc->reg_ch_base + XDMAC_IEN);
val &= ~(XDMAC_IEN_ENDIEN | XDMAC_IEN_ERRIEN);
writel(val, xc->reg_ch_base + XDMAC_IEN);
/* stop XDMAC */
val = readl(xc->reg_ch_base + XDMAC_TSS);
val &= ~XDMAC_TSS_REQ;
writel(0, xc->reg_ch_base + XDMAC_TSS);
/* wait until transfer is stopped */ return readl_poll_timeout_atomic(xc->reg_ch_base + XDMAC_STAT, val,
!(val & XDMAC_STAT_TENF), 100, 1000);
}
/* xc->vc.lock must be held by caller */ staticvoid uniphier_xdmac_start(struct uniphier_xdmac_chan *xc)
{ struct uniphier_xdmac_desc *xd;
xd = uniphier_xdmac_next_desc(xc); if (xd)
uniphier_xdmac_chan_start(xc, xd);
/* set desc to chan regardless of xd is null */
xc->xd = xd;
}
staticvoid uniphier_xdmac_chan_irq(struct uniphier_xdmac_chan *xc)
{
u32 stat; int ret;
spin_lock(&xc->vc.lock);
stat = readl(xc->reg_ch_base + XDMAC_ID);
if (stat & XDMAC_ID_ERRIDF) {
ret = uniphier_xdmac_chan_stop(xc); if (ret)
dev_err(xc->xdev->ddev.dev, "DMA transfer error with aborting issue\n"); else
dev_err(xc->xdev->ddev.dev, "DMA transfer error\n");
if (!maxburst)
maxburst = 1; if (maxburst > xc->xdev->ddev.max_burst) {
dev_err(xc->xdev->ddev.dev, "Exceed maximum number of burst words\n"); return NULL;
}
/* * Currently transfer that size doesn't align the unit size * (the number of burst words * bus-width) is not allowed, * because the driver does not support the way to transfer * residue size. As a matter of fact, in order to transfer * arbitrary size, 'src_maxburst' or 'dst_maxburst' of * dma_slave_config must be 1.
*/ if (sg_dma_len(sg) % xd->nodes[i].burst_size) {
dev_err(xc->xdev->ddev.dev, "Unaligned transfer size: %d", sg_dma_len(sg));
kfree(xd); return NULL;
}
if (xd->nodes[i].nr_burst > XDMAC_MAX_WORDS) {
dev_err(xc->xdev->ddev.dev, "Exceed maximum transfer size");
kfree(xd); return NULL;
}
}
/* * Before reaching here, almost all descriptors have been freed by the * ->device_free_chan_resources() hook. However, each channel might * be still holding one descriptor that was on-flight at that moment. * Terminate it to make sure this hardware is no longer running. Then, * free the channel resources once again to avoid memory leak.
*/
list_for_each_entry(chan, &ddev->channels, device_node) {
ret = dmaengine_terminate_sync(chan); if (ret) { /* * This results in resource leakage and maybe also * use-after-free errors as e.g. *xdev is kfreed.
*/
dev_alert(&pdev->dev, "Failed to terminate channel %d (%pe)\n",
chan->chan_id, ERR_PTR(ret)); return;
}
uniphier_xdmac_free_chan_resources(chan);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.