// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. * Copyright (C) Semihalf 2009 * Copyright (C) Ilya Yanok, Emcraft Systems 2010 * Copyright (C) Alexander Popov, Promcontroller 2014 * Copyright (C) Mario Six, Guntermann & Drunck GmbH, 2016 * * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description * (defines, structures and comments) was taken from MPC5121 DMA driver * written by Hongjun Chen <hong-jun.chen@freescale.com>. * * Approved as OSADL project by a majority of OSADL members and funded * by OSADL membership fees in 2009; for details see www.osadl.org.
*/
/* * MPC512x and MPC8308 DMA driver. It supports memory to memory data transfers * (tested using dmatest module) and data transfers between memory and * peripheral I/O memory by means of slave scatter/gather with these * limitations: * - chunked transfers (described by s/g lists with more than one item) are * refused as long as proper support for scatter/gather is missing * - transfers on MPC8308 always start from software as this SoC does not have * external request lines for peripheral flow control * - memory <-> I/O memory transfer chunks of sizes of 1, 2, 4, 16 (for * MPC512x), and 32 bytes are supported, and, consequently, source * addresses and destination addresses must be aligned accordingly; * furthermore, for MPC512x SoCs, the transfer size must be aligned on * (chunk size * maxburst)
*/
/* * Maximum channel counts for individual hardware variants * and the maximum channel count over all supported controllers, * used for data structure size
*/ #define MPC8308_DMACHAN_MAX 16 #define MPC512x_DMACHAN_MAX 64 #define MPC_DMA_CHANNELS 64
/* Arbitration mode of group and channel */ #define MPC_DMA_DMACR_EDCG (1 << 31) #define MPC_DMA_DMACR_ERGA (1 << 3) #define MPC_DMA_DMACR_ERCA (1 << 2)
/* * Execute all queued DMA descriptors. * * Following requirements must be met while calling mpc_dma_execute(): * a) mchan->lock is acquired, * b) mchan->active list is empty, * c) mchan->queued list contains at least one entry.
*/ staticvoid mpc_dma_execute(struct mpc_dma_chan *mchan)
{ struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan); struct mpc_dma_desc *first = NULL; struct mpc_dma_desc *prev = NULL; struct mpc_dma_desc *mdesc; int cid = mchan->chan.chan_id;
while (!list_empty(&mchan->queued)) {
mdesc = list_first_entry(&mchan->queued, struct mpc_dma_desc, node); /* * Grab either several mem-to-mem transfer descriptors * or one peripheral transfer descriptor, * don't mix mem-to-mem and peripheral transfer descriptors * within the same 'active' list.
*/ if (mdesc->will_access_peripheral) { if (list_empty(&mchan->active))
list_move_tail(&mdesc->node, &mchan->active); break;
} else {
list_move_tail(&mdesc->node, &mchan->active);
}
}
/* Chain descriptors into one transaction */
list_for_each_entry(mdesc, &mchan->active, node) { if (!first)
first = mdesc;
/* Send first descriptor in chain into hardware */
memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
if (first != prev)
mdma->tcd[cid].e_sg = 1;
if (mdma->is_mpc8308) { /* MPC8308, no request lines, software initiated start */
out_8(&mdma->regs->dmassrt, cid);
} elseif (first->will_access_peripheral) { /* Peripherals involved, start by external request signal */
out_8(&mdma->regs->dmaserq, cid);
} else { /* Memory to memory transfer, software initiated start */
out_8(&mdma->regs->dmassrt, cid);
}
}
/* Handle interrupt on one half of DMA controller (32 channels) */ staticvoid mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
{ struct mpc_dma_chan *mchan; struct mpc_dma_desc *mdesc;
u32 status = is | es; int ch;
while ((ch = fls(status) - 1) >= 0) {
status &= ~(1 << ch);
mchan = &mdma->channels[ch + off];
for (i = 0; i < mdma->dma.chancnt; i++) {
mchan = &mdma->channels[i];
/* Get all completed descriptors */
spin_lock_irqsave(&mchan->lock, flags); if (!list_empty(&mchan->completed))
list_splice_tail_init(&mchan->completed, &list);
spin_unlock_irqrestore(&mchan->lock, flags);
if (list_empty(&list)) continue;
/* Execute callbacks and run dependencies */
list_for_each_entry(mdesc, &list, node) {
desc = &mdesc->desc;
/* Alloc DMA memory for Transfer Control Descriptors */
tcd = dma_alloc_coherent(mdma->dma.dev,
MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
&tcd_paddr, GFP_KERNEL); if (!tcd) return -ENOMEM;
/* Alloc descriptors for this channel */ for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL); if (!mdesc) {
dev_notice(mdma->dma.dev, "Memory allocation error. Allocated only %u descriptors\n", i); break;
}
/* Return error only if no descriptors were allocated */ if (i == 0) {
dma_free_coherent(mdma->dma.dev,
MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
tcd, tcd_paddr); return -ENOMEM;
}
/* Channel must be idle */
BUG_ON(!list_empty(&mchan->prepared));
BUG_ON(!list_empty(&mchan->queued));
BUG_ON(!list_empty(&mchan->active));
BUG_ON(!list_empty(&mchan->completed));
/* Send all pending descriptor to hardware */ staticvoid mpc_dma_issue_pending(struct dma_chan *chan)
{ /* * We are posting descriptors to the hardware as soon as * they are ready, so this function does nothing.
*/
}
/* Place descriptor in prepared list */
spin_lock_irqsave(&mchan->lock, iflags);
list_add_tail(&mdesc->node, &mchan->prepared);
spin_unlock_irqrestore(&mchan->lock, iflags);
if (mdma->is_mpc8308) {
tcd->nbytes = sg_dma_len(sg); if (!IS_ALIGNED(tcd->nbytes, mchan->swidth)) goto err_prep;
/* No major loops for MPC8303 */
tcd->biter = 1;
tcd->citer = 1;
} else {
len = sg_dma_len(sg);
tcd->nbytes = tcd_nunits * tcd->ssize; if (!IS_ALIGNED(len, tcd->nbytes)) goto err_prep;
iter = len / tcd->nbytes; if (iter >= 1 << 15) { /* len is too big */ goto err_prep;
} /* citer_linkch contains the high bits of iter */
tcd->biter = iter & 0x1ff;
tcd->biter_linkch = iter >> 9;
tcd->citer = tcd->biter;
tcd->citer_linkch = tcd->biter_linkch;
}
tcd->e_sg = 0;
tcd->d_req = 1;
/* Place descriptor in prepared list */
spin_lock_irqsave(&mchan->lock, iflags);
list_add_tail(&mdesc->node, &mchan->prepared);
spin_unlock_irqrestore(&mchan->lock, iflags);
}
return &mdesc->desc;
err_prep: /* Put the descriptor back */
spin_lock_irqsave(&mchan->lock, iflags);
list_add_tail(&mdesc->node, &mchan->free);
spin_unlock_irqrestore(&mchan->lock, iflags);
return NULL;
}
inlinebool is_buswidth_valid(u8 buswidth, bool is_mpc8308)
{ switch (buswidth) { case 16: if (is_mpc8308) returnfalse; break; case 1: case 2: case 4: case 32: break; default: returnfalse;
}
/* * Software constraints: * - only transfers between a peripheral device and memory are * supported * - transfer chunk sizes of 1, 2, 4, 16 (for MPC512x), and 32 bytes * are supported, and, consequently, source addresses and * destination addresses; must be aligned accordingly; furthermore, * for MPC512x SoCs, the transfer size must be aligned on (chunk * size * maxburst) * - during the transfer, the RAM address is incremented by the size * of transfer chunk * - the peripheral port's address is constant during the transfer.
*/
if (!IS_ALIGNED(cfg->src_addr, cfg->src_addr_width) ||
!IS_ALIGNED(cfg->dst_addr, cfg->dst_addr_width)) { return -EINVAL;
}
if (!is_buswidth_valid(cfg->src_addr_width, mdma->is_mpc8308) ||
!is_buswidth_valid(cfg->dst_addr_width, mdma->is_mpc8308)) return -EINVAL;
/* Register with OF helpers for DMA lookups (nonfatal) */ if (dev->of_node) {
retval = of_dma_controller_register(dev->of_node,
of_dma_xlate_by_chan_id, mdma); if (retval)
dev_warn(dev, "Could not register for OF lookup\n");
}
return 0;
err_free2: if (mdma->is_mpc8308)
free_irq(mdma->irq2, mdma);
err_free1:
free_irq(mdma->irq, mdma);
err_dispose2: if (mdma->is_mpc8308)
irq_dispose_mapping(mdma->irq2);
err_dispose1:
irq_dispose_mapping(mdma->irq);
err: return retval;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.