/* * * BRIEF MODULE DESCRIPTION * The Descriptor Based DMA channel manager that first appeared * on the Au1550. I started with dma.c, but I think all that is * left is this initial comment :-) * * Copyright 2004 Embedded Edge, LLC * dan@embeddededge.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. *
*/
/* * The Descriptor Based DMA supports up to 16 channels. * * There are 32 devices defined. We keep an internal structure * of devices using these channels, along with additional * information. * * We allocate the descriptors and allow access to them through various * functions. The drivers allocate the data buffers and assign them * to the descriptors.
*/ static DEFINE_SPINLOCK(au1xxx_dbdma_spin_lock);
/* I couldn't find a macro that did this... */ #define ALIGN_ADDR(x, a) ((((u32)(x)) + (a-1)) & ~(a-1))
/* Allocate a channel and return a non-zero descriptor if successful. */
u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid, void (*callback)(int, void *), void *callparam)
{ unsignedlong flags;
u32 used, chan;
u32 dcp; int i;
dbdev_tab_t *stp, *dtp;
chan_tab_t *ctp;
au1x_dma_chan_t *cp;
/* * We do the initialization on the first channel allocation. * We have to wait because of the interrupt handler initialization * which can't be done successfully during board set up.
*/ if (!dbdma_initialized) return 0;
stp = find_dbdev_id(srcid); if (stp == NULL) return 0;
dtp = find_dbdev_id(destid); if (dtp == NULL) return 0;
used = 0;
/* Check to see if we can get both channels. */
spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags); if (!(stp->dev_flags & DEV_FLAGS_INUSE) ||
(stp->dev_flags & DEV_FLAGS_ANYUSE)) { /* Got source */
stp->dev_flags |= DEV_FLAGS_INUSE; if (!(dtp->dev_flags & DEV_FLAGS_INUSE) ||
(dtp->dev_flags & DEV_FLAGS_ANYUSE)) { /* Got destination */
dtp->dev_flags |= DEV_FLAGS_INUSE;
} else { /* Can't get dest. Release src. */
stp->dev_flags &= ~DEV_FLAGS_INUSE;
used++;
}
} else
used++;
spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
if (used) return 0;
/* Let's see if we can allocate a channel for it. */
ctp = NULL;
chan = 0;
spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags); for (i = 0; i < NUM_DBDMA_CHANS; i++) if (chan_tab_ptr[i] == NULL) { /* * If kmalloc fails, it is caught below same * as a channel not available.
*/
ctp = kmalloc(sizeof(chan_tab_t), GFP_ATOMIC);
chan_tab_ptr[i] = ctp; break;
}
spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
/* Initialize channel configuration. */
i = 0; if (stp->dev_intlevel)
i |= DDMA_CFG_SED; if (stp->dev_intpolarity)
i |= DDMA_CFG_SP; if (dtp->dev_intlevel)
i |= DDMA_CFG_DED; if (dtp->dev_intpolarity)
i |= DDMA_CFG_DP; if ((stp->dev_flags & DEV_FLAGS_SYNC) ||
(dtp->dev_flags & DEV_FLAGS_SYNC))
i |= DDMA_CFG_SYNC;
cp->ddma_cfg = i;
wmb(); /* drain writebuffer */
/* * Return a non-zero value that can be used to find the channel * information in subsequent operations.
*/ return (u32)(&chan_tab_ptr[chan]);
}
/* * Set the device width if source or destination is a FIFO. * Should be 8, 16, or 32 bits.
*/
u32 au1xxx_dbdma_set_devwidth(u32 chanid, int bits)
{
u32 rv;
chan_tab_t *ctp;
dbdev_tab_t *stp, *dtp;
/* Allocate a descriptor ring, initializing as much as possible. */
u32 au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
{ int i;
u32 desc_base, srcid, destid;
u32 cmd0, cmd1, src1, dest1;
u32 src0, dest0;
chan_tab_t *ctp;
dbdev_tab_t *stp, *dtp;
au1x_ddma_desc_t *dp;
/* * I guess we could check this to be within the * range of the table......
*/
ctp = *((chan_tab_t **)chanid);
stp = ctp->chan_src;
dtp = ctp->chan_dest;
/* * The descriptors must be 32-byte aligned. There is a * possibility the allocation will give us such an address, * and if we try that first we are likely to not waste larger * slabs of memory.
*/
desc_base = (u32)kmalloc_array(entries, sizeof(au1x_ddma_desc_t),
GFP_KERNEL|GFP_DMA); if (desc_base == 0) return 0;
if (desc_base & 0x1f) { /* * Lost....do it again, allocate extra, and round * the address base.
*/
kfree((constvoid *)desc_base);
i = entries * sizeof(au1x_ddma_desc_t);
i += (sizeof(au1x_ddma_desc_t) - 1);
desc_base = (u32)kmalloc(i, GFP_KERNEL|GFP_DMA); if (desc_base == 0) return 0;
/* Is it mem to mem transfer? */ if (((DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_THROTTLE) ||
(DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_ALWAYS)) &&
((DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_THROTTLE) ||
(DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_ALWAYS)))
cmd0 |= DSCR_CMD0_MEM;
switch (stp->dev_devwidth) { case 8:
cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_BYTE); break; case 16:
cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_HALFWORD); break; case 32: default:
cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_WORD); break;
}
switch (dtp->dev_devwidth) { case 8:
cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_BYTE); break; case 16:
cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_HALFWORD); break; case 32: default:
cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_WORD); break;
}
/* * If the device is marked as an in/out FIFO, ensure it is * set non-coherent.
*/ if (stp->dev_flags & DEV_FLAGS_IN)
cmd0 |= DSCR_CMD0_SN; /* Source in FIFO */ if (dtp->dev_flags & DEV_FLAGS_OUT)
cmd0 |= DSCR_CMD0_DN; /* Destination out FIFO */
/* * Set up source1. For now, assume no stride and increment. * A channel attribute update can change this later.
*/ switch (stp->dev_tsize) { case 1:
src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE1); break; case 2:
src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE2); break; case 4:
src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE4); break; case 8: default:
src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE8); break;
}
/* If source input is FIFO, set static address. */ if (stp->dev_flags & DEV_FLAGS_IN) { if (stp->dev_flags & DEV_FLAGS_BURSTABLE)
src1 |= DSCR_SRC1_SAM(DSCR_xAM_BURST); else
src1 |= DSCR_SRC1_SAM(DSCR_xAM_STATIC);
}
if (stp->dev_physaddr)
src0 = stp->dev_physaddr;
/* * Set up dest1. For now, assume no stride and increment. * A channel attribute update can change this later.
*/ switch (dtp->dev_tsize) { case 1:
dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE1); break; case 2:
dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE2); break; case 4:
dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE4); break; case 8: default:
dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE8); break;
}
/* If destination output is FIFO, set static address. */ if (dtp->dev_flags & DEV_FLAGS_OUT) { if (dtp->dev_flags & DEV_FLAGS_BURSTABLE)
dest1 |= DSCR_DEST1_DAM(DSCR_xAM_BURST); else
dest1 |= DSCR_DEST1_DAM(DSCR_xAM_STATIC);
}
/* Make last descriptor point to the first. */
dp--;
dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(ctp->chan_desc_base));
ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
/* * Put a source buffer into the DMA ring. * This updates the source pointer and byte count. Normally used * for memory to fifo transfers.
*/
u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
{
chan_tab_t *ctp;
au1x_ddma_desc_t *dp;
/* * I guess we could check this to be within the * range of the table......
*/
ctp = *(chan_tab_t **)chanid;
/* * We should have multiple callers for a particular channel, * an interrupt doesn't affect this pointer nor the descriptor, * so no locking should be needed.
*/
dp = ctp->put_ptr;
/* * If the descriptor is valid, we are way ahead of the DMA * engine, so just return an error condition.
*/ if (dp->dscr_cmd0 & DSCR_CMD0_V) return 0;
/* Load up buffer address and byte count. */
dp->dscr_source0 = buf & ~0UL;
dp->dscr_cmd1 = nbytes; /* Check flags */ if (flags & DDMA_FLAGS_IE)
dp->dscr_cmd0 |= DSCR_CMD0_IE; if (flags & DDMA_FLAGS_NOIE)
dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
/* * There is an erratum on certain Au1200/Au1550 revisions that could * result in "stale" data being DMA'ed. It has to do with the snoop * logic on the cache eviction buffer. dma_default_coherent is set * to false on these parts.
*/ if (!dma_default_coherent)
dma_cache_wback_inv(KSEG0ADDR(buf), nbytes);
dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
wmb(); /* drain writebuffer */
dma_cache_wback_inv((unsignedlong)dp, sizeof(*dp));
ctp->chan_ptr->ddma_dbell = 0;
wmb(); /* force doorbell write out to dma engine */
/* Get next descriptor pointer. */
ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
/* Put a destination buffer into the DMA ring. * This updates the destination pointer and byte count. Normally used * to place an empty buffer into the ring for fifo to memory transfers.
*/
u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
{
chan_tab_t *ctp;
au1x_ddma_desc_t *dp;
/* I guess we could check this to be within the * range of the table......
*/
ctp = *((chan_tab_t **)chanid);
/* We should have multiple callers for a particular channel, * an interrupt doesn't affect this pointer nor the descriptor, * so no locking should be needed.
*/
dp = ctp->put_ptr;
/* If the descriptor is valid, we are way ahead of the DMA * engine, so just return an error condition.
*/ if (dp->dscr_cmd0 & DSCR_CMD0_V) return 0;
/* Load up buffer address and byte count */
/* Check flags */ if (flags & DDMA_FLAGS_IE)
dp->dscr_cmd0 |= DSCR_CMD0_IE; if (flags & DDMA_FLAGS_NOIE)
dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
dp->dscr_dest0 = buf & ~0UL;
dp->dscr_cmd1 = nbytes; #if 0
printk(KERN_DEBUG "cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n",
dp->dscr_cmd0, dp->dscr_cmd1, dp->dscr_source0,
dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1); #endif /* * There is an erratum on certain Au1200/Au1550 revisions that could * result in "stale" data being DMA'ed. It has to do with the snoop * logic on the cache eviction buffer. dma_default_coherent is set * to false on these parts.
*/ if (!dma_default_coherent)
dma_cache_inv(KSEG0ADDR(buf), nbytes);
dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
wmb(); /* drain writebuffer */
dma_cache_wback_inv((unsignedlong)dp, sizeof(*dp));
ctp->chan_ptr->ddma_dbell = 0;
wmb(); /* force doorbell write out to dma engine */
/* Get next descriptor pointer. */
ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
/* * Get a destination buffer into the DMA ring. * Normally used to get a full buffer from the ring during fifo * to memory transfers. This does not set the valid bit, you will * have to put another destination buffer to keep the DMA going.
*/
u32 au1xxx_dbdma_get_dest(u32 chanid, void **buf, int *nbytes)
{
chan_tab_t *ctp;
au1x_ddma_desc_t *dp;
u32 rv;
/* * I guess we could check this to be within the * range of the table......
*/
ctp = *((chan_tab_t **)chanid);
/* * We should have multiple callers for a particular channel, * an interrupt doesn't affect this pointer nor the descriptor, * so no locking should be needed.
*/
dp = ctp->get_ptr;
/* * If the descriptor is valid, we are way ahead of the DMA * engine, so just return an error condition.
*/ if (dp->dscr_cmd0 & DSCR_CMD0_V) return 0;
/* * Start using the current descriptor pointer. If the DBDMA encounters * a non-valid descriptor, it will stop. In this case, we can just * continue by adding a buffer to the list and starting again.
*/ void au1xxx_dbdma_start(u32 chanid)
{
chan_tab_t *ctp;
au1x_dma_chan_t *cp;
/* Run through the descriptors and reset the valid indicator. */
dp = ctp->chan_desc_base;
do {
dp->dscr_cmd0 &= ~DSCR_CMD0_V; /* * Reset our software status -- this is used to determine * if a descriptor is in use by upper level software. Since * posting can reset 'V' bit.
*/
dp->sw_status = 0;
dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
} while (dp != ctp->chan_desc_base);
}
EXPORT_SYMBOL(au1xxx_dbdma_reset);
/* Put a descriptor into the DMA ring. * This updates the source/destination pointers and byte count.
*/
u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
{
chan_tab_t *ctp;
au1x_ddma_desc_t *dp;
u32 nbytes = 0;
/* * I guess we could check this to be within the * range of the table......
*/
ctp = *((chan_tab_t **)chanid);
/* * We should have multiple callers for a particular channel, * an interrupt doesn't affect this pointer nor the descriptor, * so no locking should be needed.
*/
dp = ctp->put_ptr;
/* * If the descriptor is valid, we are way ahead of the DMA * engine, so just return an error condition.
*/ if (dp->dscr_cmd0 & DSCR_CMD0_V) return 0;
/* Load up buffer addresses and byte count. */
dp->dscr_dest0 = dscr->dscr_dest0;
dp->dscr_source0 = dscr->dscr_source0;
dp->dscr_dest1 = dscr->dscr_dest1;
dp->dscr_source1 = dscr->dscr_source1;
dp->dscr_cmd1 = dscr->dscr_cmd1;
nbytes = dscr->dscr_cmd1; /* Allow the caller to specify if an interrupt is generated */
dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V;
ctp->chan_ptr->ddma_dbell = 0;
/* Get next descriptor pointer. */
ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.