/* * This supports the Synopsys "DesignWare AHB Central DMA Controller", * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all * of which use ARM any more). See the "Databook" from Synopsys for * information beyond what licensees probably provide.
*/
/* The set of bus widths supported by the DMA controller */ #define DW_DMA_BUSWIDTHS \
BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
/* * REVISIT: We should attempt to chain as many descriptors as * possible, perhaps even appending to those already submitted * for DMA. But this is hard to do in a race-free manner.
*/
/* Move pointer to next descriptor */
dwc->tx_node_active = dwc->tx_node_active->next;
}
/* Called with dwc->lock held and bh disabled */ staticvoid dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
{ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
u8 lms = DWC_LLP_LMS(dwc->dws.m_master); unsignedlong was_soft_llp;
/* ASSERT: channel is idle */ if (dma_readl(dw, CH_EN) & dwc->mask) {
dev_err(chan2dev(&dwc->chan), "%s: BUG: Attempted to start non-idle channel\n",
__func__);
dwc_dump_chan_regs(dwc);
/* The tasklet will hopefully advance the queue... */ return;
}
if (dwc->nollp) {
was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
&dwc->flags); if (was_soft_llp) {
dev_err(chan2dev(&dwc->chan), "BUG: Attempted to start new LLP transfer inside ongoing one\n"); return;
}
spin_lock_irqsave(&dwc->lock, flags); if (dma_readl(dw, CH_EN) & dwc->mask) {
dev_err(chan2dev(&dwc->chan), "BUG: XFER bit set, but channel not idle!\n");
/* Try to continue after resetting the channel... */
dwc_chan_disable(dw, dwc);
}
/* * Submit queued descriptors ASAP, i.e. before we go through * the completed ones.
*/
list_splice_init(&dwc->active_list, &list);
dwc_dostart_first_queued(dwc);
if (status_xfer & dwc->mask) { /* Everything we've submitted is done */
dma_writel(dw, CLEAR.XFER, dwc->mask);
if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { struct list_head *head, *active = dwc->tx_node_active;
/* * We are inside first active descriptor. * Otherwise something is really wrong.
*/
desc = dwc_first_active(dwc);
head = &desc->tx_list; if (active != head) { /* Update residue to reflect last sent descriptor */ if (active == head->next)
desc->residue -= desc->len; else
desc->residue -= to_dw_desc(active->prev)->len;
child = to_dw_desc(active);
/* Submit next block */
dwc_do_single_block(dwc, child);
/* Check first descriptors addr */ if (desc->txd.phys == DWC_LLP_LOC(llp)) {
spin_unlock_irqrestore(&dwc->lock, flags); return;
}
/* Check first descriptors llp */ if (lli_read(desc, llp) == llp) { /* This one is currently in progress */
desc->residue -= dwc_get_sent(dwc);
spin_unlock_irqrestore(&dwc->lock, flags); return;
}
desc->residue -= desc->len;
list_for_each_entry(child, &desc->tx_list, desc_node) { if (lli_read(child, llp) == llp) { /* Currently in progress */
desc->residue -= dwc_get_sent(dwc);
spin_unlock_irqrestore(&dwc->lock, flags); return;
}
desc->residue -= child->len;
}
/* * No descriptors so far seem to be in progress, i.e. * this one must be done.
*/
spin_unlock_irqrestore(&dwc->lock, flags);
dwc_descriptor_complete(dwc, desc, true);
spin_lock_irqsave(&dwc->lock, flags);
}
dev_err(chan2dev(&dwc->chan), "BUG: All descriptors done, but channel not idle!\n");
/* Try to continue after resetting the channel... */
dwc_chan_disable(dw, dwc);
/* * The descriptor currently at the head of the active list is * borked. Since we don't have any way to report errors, we'll * just have to scream loudly and try to carry on.
*/
bad_desc = dwc_first_active(dwc);
list_del_init(&bad_desc->desc_node);
list_move(dwc->queue.next, dwc->active_list.prev);
/* Clear the error flag and try to restart the controller */
dma_writel(dw, CLEAR.ERROR, dwc->mask); if (!list_empty(&dwc->active_list))
dwc_dostart(dwc, dwc_first_active(dwc));
/* * WARN may seem harsh, but since this only happens * when someone submits a bad physical address in a * descriptor, we should consider ourselves lucky that the * controller flagged an error instead of scribbling over * random memory locations.
*/
dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n" " cookie: %d\n", bad_desc->txd.cookie);
dwc_dump_lli(dwc, bad_desc);
list_for_each_entry(child, &bad_desc->tx_list, desc_node)
dwc_dump_lli(dwc, child);
/* Check if we have any interrupt from the DMAC which is not in use */ if (!dw->in_use) return IRQ_NONE;
status = dma_readl(dw, STATUS_INT);
dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
/* Check if we have any interrupt from the DMAC */ if (!status) return IRQ_NONE;
/* * Just disable the interrupts. We'll turn them back on in the * softirq handler.
*/
channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
status = dma_readl(dw, STATUS_INT); if (status) {
dev_err(dw->dma.dev, "BUG: Unexpected interrupts pending: 0x%x\n",
status);
/* * It's possible to have a data portion locked in the DMA FIFO in case * of the channel suspension. Subsequent channel disabling will cause * that data silent loss. In order to prevent that maintain the src and * dst transfer widths coherency by means of the relation: * (CTLx.SRC_TR_WIDTH * CTLx.SRC_MSIZE >= CTLx.DST_TR_WIDTH) * Look for the details in the commit message that brings this change. * * Note the DMA configs utilized in the calculations below must have * been verified to have correct values by this method call.
*/ if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) {
reg_width = dwc->dma_sconfig.dst_addr_width; if (mem_width < reg_width) return -EINVAL;
/* ASSERT: channel is idle */ if (dma_readl(dw, CH_EN) & dwc->mask) {
dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); return -EIO;
}
dma_cookie_init(chan);
/* * NOTE: some controllers may have additional features that we * need to initialize here, like "scatter-gather" (which * doesn't mean what you think it means), and status writeback.
*/
/* * We need controller-specific data to set up slave transfers.
*/ if (chan->private && !dw_dma_filter(chan, chan->private)) {
dev_warn(chan2dev(chan), "Wrong controller-specific data\n"); return -EINVAL;
}
/* Enable controller here if needed */ if (!dw->in_use)
do_dw_dma_on(dw);
dw->in_use |= dwc->mask;
/* * It might be crucial for some devices to have the hardware * accelerated multi-block transfers supported, aka LLPs in DW DMAC * notation. So if LLPs are supported then max_sg_burst is set to * zero which means unlimited number of SG entries can be handled in a * single DMA transaction, otherwise it's just one SG entry.
*/ if (dwc->nollp)
caps->max_sg_burst = 1; else
caps->max_sg_burst = 0;
}
/* Fill platform data with the default values */
pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
} elseif (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
ret = -EINVAL; goto err_pdata;
} else {
memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata));
/* Reassign the platform data pointer */
pdata = dw->pdata;
}
dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan),
GFP_KERNEL); if (!dw->chan) {
ret = -ENOMEM; goto err_pdata;
}
/* Calculate all channel mask before DMA setup */
dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
/* Force dma off, just in case */
dw->disable(dw);
/* Device and instance ID for IRQ and DMA pool */
dw->set_device_name(dw, chip->id);
/* Create a pool of consistent memory blocks for hardware descriptors */
dw->desc_pool = dmam_pool_create(dw->name, chip->dev, sizeof(struct dw_desc), 4, 0); if (!dw->desc_pool) {
dev_err(chip->dev, "No memory for descriptors dma pool\n");
ret = -ENOMEM; goto err_pdata;
}
tasklet_setup(&dw->tasklet, dw_dma_tasklet);
ret = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
dw->name, dw); if (ret) goto err_pdata;
INIT_LIST_HEAD(&dw->dma.channels); for (i = 0; i < pdata->nr_channels; i++) { struct dw_dma_chan *dwc = &dw->chan[i];
/* Hardware configuration */ if (autocfg) { unsignedint r = DW_DMA_MAX_NR_CHANNELS - i - 1; void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r]; unsignedint dwc_params = readl(addr);
dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
dwc_params);
/* * Decode maximum block size for given channel. The * stored 4 bit value represents blocks from 0x00 for 3 * up to 0x0a for 4095.
*/
dwc->block_size =
(4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1;
/* * According to the DW DMA databook the true scatter- * gether LLPs aren't available if either multi-block * config is disabled (CHx_MULTI_BLK_EN == 0) or the * LLP register is hard-coded to zeros * (CHx_HC_LLP == 1).
*/
dwc->nollp =
(dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0 ||
(dwc_params >> DWC_PARAMS_HC_LLP & 0x1) == 1;
dwc->max_burst =
(0x4 << (dwc_params >> DWC_PARAMS_MSIZE & 0x7));
} else {
dwc->block_size = pdata->block_size;
dwc->nollp = !pdata->multi_block[i];
dwc->max_burst = pdata->max_burst[i] ?: DW_DMA_MAX_BURST;
}
}
/* Clear all interrupts on all channels. */
dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
/* Set capabilities */
dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
/* * For now there is no hardware with non uniform maximum block size * across all of the device channels, so we set the maximum segment * size as the block size found for the very first channel.
*/
dma_set_max_seg_size(dw->dma.dev, dw->chan[0].block_size);
ret = dma_async_device_register(&dw->dma); if (ret) goto err_dma_register;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.