// SPDX-License-Identifier: GPL-2.0-or-later /* * DMA driver for Xilinx DMA/Bridge Subsystem * * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved. * Copyright (C) 2022, Advanced Micro Devices, Inc.
*/
/* * The DMA/Bridge Subsystem for PCI Express allows for the movement of data * between Host memory and the DMA subsystem. It does this by operating on * 'descriptors' that contain information about the source, destination and * amount of data to transfer. These direct memory transfers can be both in * the Host to Card (H2C) and Card to Host (C2H) transfers. The DMA can be * configured to have a single AXI4 Master interface shared by all channels * or one AXI4-Stream interface for each channel enabled. Memory transfers are * specified on a per-channel basis in descriptor linked lists, which the DMA * fetches from host memory and processes. Events such as descriptor completion * and errors are signaled using interrupts. The core also provides up to 16 * user interrupt wires that generate interrupts to the host.
*/
/** * struct xdma_chan - Driver specific DMA channel structure * @vchan: Virtual channel * @xdev_hdl: Pointer to DMA device structure * @base: Offset of channel registers * @desc_pool: Descriptor pool * @busy: Busy flag of the channel * @dir: Transferring direction of the channel * @cfg: Transferring config of the channel * @irq: IRQ assigned to the channel
*/ struct xdma_chan { struct virt_dma_chan vchan; void *xdev_hdl;
u32 base; struct dma_pool *desc_pool; bool busy; enum dma_transfer_direction dir; struct dma_slave_config cfg;
u32 irq; struct completion last_interrupt; bool stop_requested;
};
/** * struct xdma_desc - DMA desc structure * @vdesc: Virtual DMA descriptor * @chan: DMA channel pointer * @dir: Transferring direction of the request * @desc_blocks: Hardware descriptor blocks * @dblk_num: Number of hardware descriptor blocks * @desc_num: Number of hardware descriptors * @completed_desc_num: Completed hardware descriptors * @cyclic: Cyclic transfer vs. scatter-gather * @interleaved_dma: Interleaved DMA transfer * @periods: Number of periods in the cyclic transfer * @period_size: Size of a period in bytes in cyclic transfers * @frames_left: Number of frames left in interleaved DMA transfer * @error: tx error flag
*/ struct xdma_desc { struct virt_dma_desc vdesc; struct xdma_chan *chan; enum dma_transfer_direction dir; struct xdma_desc_block *desc_blocks;
u32 dblk_num;
u32 desc_num;
u32 completed_desc_num; bool cyclic; bool interleaved_dma;
u32 periods;
u32 period_size;
u32 frames_left; bool error;
};
/* Get the last desc in a desc block */ staticinlinevoid *xdma_blk_last_desc(struct xdma_desc_block *block)
{ return block->virt_addr + (XDMA_DESC_ADJACENT - 1) * XDMA_DESC_SIZE;
}
/** * xdma_link_sg_desc_blocks - Link SG descriptor blocks for DMA transfer * @sw_desc: Tx descriptor pointer
*/ staticvoid xdma_link_sg_desc_blocks(struct xdma_desc *sw_desc)
{ struct xdma_desc_block *block;
u32 last_blk_desc, desc_control; struct xdma_hw_desc *desc; int i;
desc_control = XDMA_DESC_CONTROL(XDMA_DESC_ADJACENT, 0); for (i = 1; i < sw_desc->dblk_num; i++) {
block = &sw_desc->desc_blocks[i - 1];
desc = xdma_blk_last_desc(block);
/* * check if there is not any submitted descriptor or channel is busy. * vchan lock should be held where this function is called.
*/ if (!vd || xchan->busy) return -EINVAL;
/* clear run stop bit to get ready for transfer */
ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
CHAN_CTRL_RUN_STOP); if (ret) return ret;
/* set DMA engine to the first descriptor block */
completed_blocks = desc->completed_desc_num / XDMA_DESC_ADJACENT;
block = &desc->desc_blocks[completed_blocks];
val = lower_32_bits(block->dma_addr);
ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_LO, val); if (ret) return ret;
val = upper_32_bits(block->dma_addr);
ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_HI, val); if (ret) return ret;
if (completed_blocks + 1 == desc->dblk_num)
val = (desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK; else
val = XDMA_DESC_ADJACENT - 1;
ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_ADJ, val); if (ret) return ret;
/* kick off DMA transfer */
ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL,
CHAN_CTRL_START); if (ret) return ret;
/* clear run stop bit to prevent any further auto-triggering */ return regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
CHAN_CTRL_RUN_STOP);
}
/* detect number of available DMA channels */ for (i = 0, *chan_num = 0; i < pdata->max_dma_channels; i++) {
ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
&identifier); if (ret) return ret;
/* check if it is available DMA channel */ if (XDMA_CHAN_CHECK_TARGET(identifier, target))
(*chan_num)++;
}
if (!*chan_num) {
xdma_err(xdev, "does not probe any channel"); return -EINVAL;
}
*chans = devm_kcalloc(&xdev->pdev->dev, *chan_num, sizeof(**chans),
GFP_KERNEL); if (!*chans) return -ENOMEM;
for (i = 0, j = 0; i < pdata->max_dma_channels; i++) {
ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
&identifier); if (ret) return ret;
if (!XDMA_CHAN_CHECK_TARGET(identifier, target)) continue;
/* If the engine continues running, wait for the last interrupt */
regmap_read(xdev->rmap, xdma_chan->base + XDMA_CHAN_STATUS, &st); if (st & XDMA_CHAN_STATUS_BUSY)
wait_for_completion_timeout(&xdma_chan->last_interrupt, msecs_to_jiffies(1000));
vchan_synchronize(&xdma_chan->vchan);
}
/** * xdma_fill_descs() - Fill hardware descriptors for one contiguous memory chunk. * More than one descriptor will be used if the size is bigger * than XDMA_DESC_BLEN_MAX. * @sw_desc: Descriptor container * @src_addr: First value for the ->src_addr field * @dst_addr: First value for the ->dst_addr field * @size: Size of the contiguous memory block * @filled_descs_num: Index of the first descriptor to take care of in @sw_desc
*/ staticinline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr,
u64 dst_addr, u32 size, u32 filled_descs_num)
{
u32 left = size, len, desc_num = filled_descs_num; struct xdma_desc_block *dblk; struct xdma_hw_desc *desc;
tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags); if (!tx_desc) goto failed;
return tx_desc;
failed:
xdma_free_desc(&sw_desc->vdesc);
return NULL;
}
/** * xdma_prep_dma_cyclic - prepare for cyclic DMA transactions * @chan: DMA channel pointer * @address: Device DMA address to access * @size: Total length to transfer * @period_size: Period size to use for each transfer * @dir: Transfer direction * @flags: Transfer ack flags
*/ staticstruct dma_async_tx_descriptor *
xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
size_t size, size_t period_size, enum dma_transfer_direction dir, unsignedlong flags)
{ struct xdma_chan *xdma_chan = to_xdma_chan(chan); struct xdma_device *xdev = xdma_chan->xdev_hdl; unsignedint periods = size / period_size; struct dma_async_tx_descriptor *tx_desc; struct xdma_desc *sw_desc;
u64 addr, dev_addr, *src, *dst;
u32 desc_num; unsignedint i;
/* * Simplify the whole logic by preventing an abnormally high number of * periods and periods size.
*/ if (period_size > XDMA_DESC_BLEN_MAX) {
xdma_err(xdev, "period size limited to %lu bytes\n", XDMA_DESC_BLEN_MAX); return NULL;
}
if (periods > XDMA_DESC_ADJACENT) {
xdma_err(xdev, "number of periods limited to %u\n", XDMA_DESC_ADJACENT); return NULL;
}
sw_desc = xdma_alloc_desc(xdma_chan, periods, true); if (!sw_desc) return NULL;
if (xchan->stop_requested)
complete(&xchan->last_interrupt);
/* get submitted request */
vd = vchan_next_desc(&xchan->vchan); if (!vd) goto out;
/* Clear-on-read the status register */
ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &st); if (ret) goto out;
desc = to_xdma_desc(vd);
st &= XDMA_CHAN_STATUS_MASK; if ((st & XDMA_CHAN_ERROR_MASK) ||
!(st & (CHAN_CTRL_IE_DESC_COMPLETED | CHAN_CTRL_IE_DESC_STOPPED))) {
desc->error = true;
xdma_err(xdev, "channel error, status register value: 0x%x", st); goto out;
}
ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_COMPLETED_DESC,
&complete_desc_num); if (ret) goto out;
if (desc->interleaved_dma) {
xchan->busy = false;
desc->completed_desc_num += complete_desc_num; if (complete_desc_num == XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) {
xdma_xfer_start(xchan); goto out;
}
/* last desc of any frame */
desc->frames_left--; if (desc->frames_left) goto out;
/* last desc of the last frame */
repeat_tx = vd->tx.flags & DMA_PREP_REPEAT;
next_vd = list_first_entry_or_null(&vd->node, struct virt_dma_desc, node); if (next_vd)
repeat_tx = repeat_tx && !(next_vd->tx.flags & DMA_PREP_LOAD_EOT); if (repeat_tx) {
desc->frames_left = desc->periods;
desc->completed_desc_num = 0;
vchan_cyclic_callback(vd);
} else {
list_del(&vd->node);
vchan_cookie_complete(vd);
} /* start (or continue) the tx of a first desc on the vc.desc_issued list, if any */
xdma_xfer_start(xchan);
} elseif (!desc->cyclic) {
xchan->busy = false;
desc->completed_desc_num += complete_desc_num;
/* if all data blocks are transferred, remove and complete the request */ if (desc->completed_desc_num == desc->desc_num) {
list_del(&vd->node);
vchan_cookie_complete(vd); goto out;
}
/* free irq handler */ for (i = 0; i < xdev->h2c_chan_num; i++)
free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
for (i = 0; i < xdev->c2h_chan_num; i++)
free_irq(xdev->c2h_chans[i].irq, &xdev->c2h_chans[i]);
}
/** * xdma_set_vector_reg - configure hardware IRQ registers * @xdev: DMA device pointer * @vec_tbl_start: Start of IRQ registers * @irq_start: Start of IRQ * @irq_num: Number of IRQ
*/ staticint xdma_set_vector_reg(struct xdma_device *xdev, u32 vec_tbl_start,
u32 irq_start, u32 irq_num)
{
u32 shift, i, val = 0; int ret;
/* Each IRQ register is 32 bit and contains 4 IRQs */ while (irq_num > 0) { for (i = 0; i < 4; i++) {
shift = XDMA_IRQ_VEC_SHIFT * i;
val |= irq_start << shift;
irq_start++;
irq_num--; if (!irq_num) break;
}
/* write IRQ register */
ret = regmap_write(xdev->rmap, vec_tbl_start, val); if (ret) return ret;
vec_tbl_start += sizeof(u32);
val = 0;
}
/* config hardware IRQ registers */
ret = xdma_set_vector_reg(xdev, XDMA_IRQ_CHAN_VEC_NUM, 0,
XDMA_CHAN_NUM(xdev)); if (ret) {
xdma_err(xdev, "failed to set channel vectors: %d", ret); goto failed_init_c2h;
}
/* config user IRQ registers if needed */
user_irq_start = XDMA_CHAN_NUM(xdev); if (xdev->irq_num > user_irq_start) {
ret = xdma_set_vector_reg(xdev, XDMA_IRQ_USER_VEC_NUM,
user_irq_start,
xdev->irq_num - user_irq_start); if (ret) {
xdma_err(xdev, "failed to set user vectors: %d", ret); goto failed_init_c2h;
}
}
/* enable interrupt */
ret = regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1S, ~0); if (ret) goto failed_init_c2h;
return 0;
failed_init_c2h: while (j--)
free_irq(xdev->c2h_chans[j].irq, &xdev->c2h_chans[j]);
failed_init_h2c: while (i--)
free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
/** * xdma_disable_user_irq - Disable user interrupt * @pdev: Pointer to the platform_device structure * @irq_num: System IRQ number
*/ void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num)
{ struct xdma_device *xdev = platform_get_drvdata(pdev);
u32 index;
index = irq_num - xdev->irq_start; if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
xdma_err(xdev, "invalid user irq number"); return;
}
index -= XDMA_CHAN_NUM(xdev);
/** * xdma_enable_user_irq - Enable user logic interrupt * @pdev: Pointer to the platform_device structure * @irq_num: System IRQ number
*/ int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num)
{ struct xdma_device *xdev = platform_get_drvdata(pdev);
u32 index; int ret;
index = irq_num - xdev->irq_start; if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
xdma_err(xdev, "invalid user irq number"); return -EINVAL;
}
index -= XDMA_CHAN_NUM(xdev);
ret = regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1S, 1 << index); if (ret) return ret;
return 0;
}
EXPORT_SYMBOL(xdma_enable_user_irq);
/** * xdma_get_user_irq - Get system IRQ number * @pdev: Pointer to the platform_device structure * @user_irq_index: User logic IRQ wire index * * Return: The system IRQ number allocated for the given wire index.
*/ int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index)
{ struct xdma_device *xdev = platform_get_drvdata(pdev);
if (XDMA_CHAN_NUM(xdev) + user_irq_index >= xdev->irq_num) {
xdma_err(xdev, "invalid user irq index"); return -EINVAL;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.