/* Linked List Descriptor */ struct at_xdmac_lld {
u32 mbr_nda; /* Next Descriptor Member */
u32 mbr_ubc; /* Microblock Control Member */
u32 mbr_sa; /* Source Address Member */
u32 mbr_da; /* Destination Address Member */
u32 mbr_cfg; /* Configuration Register */
u32 mbr_bc; /* Block Control Register */
u32 mbr_ds; /* Data Stride Register */
u32 mbr_sus; /* Source Microblock Stride Register */
u32 mbr_dus; /* Destination Microblock Stride Register */
};
/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */ struct at_xdmac_desc { struct at_xdmac_lld lld; enum dma_transfer_direction direction; struct dma_async_tx_descriptor tx_dma_desc; struct list_head desc_node; /* Following members are only used by the first descriptor */ bool active_xfer; unsignedint xfer_size; struct list_head descs_list; struct list_head xfer_node;
} __aligned(sizeof(u64));
/* Set transfer as active to not try to start it again. */
first->active_xfer = true;
/* Tell xdmac where to get the first descriptor. */
reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys); if (atxdmac->layout->sdif)
reg |= AT_XDMAC_CNDA_NDAIF(atchan->memif);
at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
/* * When doing non cyclic transfer we need to use the next * descriptor view 2 since some fields of the configuration register * depend on transfer size and src/dest addresses.
*/ if (at_xdmac_chan_is_cyclic(atchan))
reg = AT_XDMAC_CNDC_NDVIEW_NDV1; elseif ((first->lld.mbr_ubc &
AT_XDMAC_CNDC_NDVIEW_MASK) == AT_XDMAC_MBR_UBC_NDV3)
reg = AT_XDMAC_CNDC_NDVIEW_NDV3; else
reg = AT_XDMAC_CNDC_NDVIEW_NDV2; /* * Even if the register will be updated from the configuration in the * descriptor when using view 2 or higher, the PROT bit won't be set * properly. This bit can be modified only by using the channel * configuration register.
*/
at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE; /* * Request Overflow Error is only for peripheral synchronized transfers
*/ if (at_xdmac_chan_is_peripheral_xfer(first->lld.mbr_cfg))
reg |= AT_XDMAC_CIE_ROIE;
/* * There is no end of list when doing cyclic dma, we need to get * an interrupt after each periods.
*/ if (at_xdmac_chan_is_cyclic(atchan))
at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
reg | AT_XDMAC_CIE_BIE); else
at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
reg | AT_XDMAC_CIE_LIE);
at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask);
dev_vdbg(chan2dev(&atchan->chan), "%s: enable channel (0x%08x)\n", __func__, atchan->mask);
wmb();
at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
/* * Only check that maxburst and addr width values are supported by * the controller but not that the configuration is good to perform the * transfer since we don't know the direction at this stage.
*/ staticint at_xdmac_check_slave_config(struct dma_slave_config *sconfig)
{ if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE)
|| (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE)) return -EINVAL;
if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH)
|| (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH)) return -EINVAL;
len = sg_dma_len(sg);
mem = sg_dma_address(sg); if (unlikely(!len)) {
dev_err(chan2dev(chan), "sg data length is zero\n"); goto spin_unlock;
}
dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
__func__, i, len, mem);
desc = at_xdmac_get_desc(atchan); if (!desc) {
dev_err(chan2dev(chan), "can't get descriptor\n"); if (first)
list_splice_tail_init(&first->descs_list,
&atchan->free_descs_list); goto spin_unlock;
}
/* * Check address alignment to select the greater data width we * can use. * * Some XDMAC implementations don't provide dword transfer, in * this case selecting dword has the same behavior as * selecting word transfers.
*/ if (!(addr & 7)) {
width = AT_XDMAC_CC_DWIDTH_DWORD;
dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
} elseif (!(addr & 3)) {
width = AT_XDMAC_CC_DWIDTH_WORD;
dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
} elseif (!(addr & 1)) {
width = AT_XDMAC_CC_DWIDTH_HALFWORD;
dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
} else {
width = AT_XDMAC_CC_DWIDTH_BYTE;
dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
}
return width;
}
staticstruct at_xdmac_desc *
at_xdmac_interleaved_queue_desc(struct dma_chan *chan, struct at_xdmac_chan *atchan, struct at_xdmac_desc *prev,
dma_addr_t src, dma_addr_t dst, struct dma_interleaved_template *xt, struct data_chunk *chunk)
{ struct at_xdmac_desc *desc;
u32 dwidth; unsignedlong flags;
size_t ublen; /* * WARNING: The channel configuration is set here since there is no * dmaengine_slave_config call in this case. Moreover we don't know the * direction, it involves we can't dynamically set the source and dest * interface so we have to use the same one. Only interface 0 allows EBI * access. Hopefully we can access DDR through both ports (at least on * SAMA5D4x), so we can use the same interface for source and dest, * that solves the fact we don't know the direction. * ERRATA: Even if useless for memory transfers, the PERID has to not * match the one of another channel. If not, it could lead to spurious * flag status. * For SAMA7G5x case, the SIF and DIF fields are no longer used. * Thus, no need to have the SIF/DIF interfaces here. * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as * zero.
*/
u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
| AT_XDMAC_CC_MBSIZE_SIXTEEN
| AT_XDMAC_CC_TYPE_MEM_TRAN;
dwidth = at_xdmac_align_width(chan, src | dst | chunk->size); if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
dev_dbg(chan2dev(chan), "%s: chunk too big (%zu, max size %lu)...\n",
__func__, chunk->size,
AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth); return NULL;
}
if (prev)
dev_dbg(chan2dev(chan), "Adding items at the end of desc 0x%p\n", prev);
if (xt->src_inc) { if (xt->src_sgl)
chan_cc |= AT_XDMAC_CC_SAM_UBS_AM; else
chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM;
}
if (xt->dst_inc) { if (xt->dst_sgl)
chan_cc |= AT_XDMAC_CC_DAM_UBS_AM; else
chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM;
}
spin_lock_irqsave(&atchan->lock, flags);
desc = at_xdmac_get_desc(atchan);
spin_unlock_irqrestore(&atchan->lock, flags); if (!desc) {
dev_err(chan2dev(chan), "can't get descriptor\n"); return NULL;
}
staticstruct dma_async_tx_descriptor *
at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsignedlong flags)
{ struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac_desc *first = NULL, *prev = NULL;
size_t remaining_size = len, xfer_size = 0, ublen;
dma_addr_t src_addr = src, dst_addr = dest;
u32 dwidth; /* * WARNING: We don't know the direction, it involves we can't * dynamically set the source and dest interface so we have to use the * same one. Only interface 0 allows EBI access. Hopefully we can * access DDR through both ports (at least on SAMA5D4x), so we can use * the same interface for source and dest, that solves the fact we * don't know the direction. * ERRATA: Even if useless for memory transfers, the PERID has to not * match the one of another channel. If not, it could lead to spurious * flag status. * For SAMA7G5x case, the SIF and DIF fields are no longer used. * Thus, no need to have the SIF/DIF interfaces here. * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as * zero.
*/
u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
| AT_XDMAC_CC_DAM_INCREMENTED_AM
| AT_XDMAC_CC_SAM_INCREMENTED_AM
| AT_XDMAC_CC_MBSIZE_SIXTEEN
| AT_XDMAC_CC_TYPE_MEM_TRAN; unsignedlong irqflags;
staticstruct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan, struct at_xdmac_chan *atchan,
dma_addr_t dst_addr,
size_t len, int value)
{ struct at_xdmac_desc *desc; unsignedlong flags;
size_t ublen;
u32 dwidth; char pattern; /* * WARNING: The channel configuration is set here since there is no * dmaengine_slave_config call in this case. Moreover we don't know the * direction, it involves we can't dynamically set the source and dest * interface so we have to use the same one. Only interface 0 allows EBI * access. Hopefully we can access DDR through both ports (at least on * SAMA5D4x), so we can use the same interface for source and dest, * that solves the fact we don't know the direction. * ERRATA: Even if useless for memory transfers, the PERID has to not * match the one of another channel. If not, it could lead to spurious * flag status. * For SAMA7G5x case, the SIF and DIF fields are no longer used. * Thus, no need to have the SIF/DIF interfaces here. * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as * zero.
*/
u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
| AT_XDMAC_CC_DAM_UBS_AM
| AT_XDMAC_CC_SAM_INCREMENTED_AM
| AT_XDMAC_CC_MBSIZE_SIXTEEN
| AT_XDMAC_CC_MEMSET_HW_MODE
| AT_XDMAC_CC_TYPE_MEM_TRAN;
dwidth = at_xdmac_align_width(chan, dst_addr);
if (len >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
dev_err(chan2dev(chan), "%s: Transfer too large, aborting...\n",
__func__); return NULL;
}
spin_lock_irqsave(&atchan->lock, flags);
desc = at_xdmac_get_desc(atchan);
spin_unlock_irqrestore(&atchan->lock, flags); if (!desc) {
dev_err(chan2dev(chan), "can't get descriptor\n"); return NULL;
}
chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
/* Only the first byte of value is to be used according to dmaengine */
pattern = (char)value;
/* * The scatterlist API gives us only the address and * length of each elements. * * Unfortunately, we don't have the stride, which we * will need to compute. * * That make us end up in a situation like this one: * len stride len stride len * +-------+ +-------+ +-------+ * | N-2 | | N-1 | | N | * +-------+ +-------+ +-------+ * * We need all these three elements (N-2, N-1 and N) * to actually take the decision on whether we need to * queue N-1 or reuse N-2. * * We will only consider N if it is the last element.
*/ if (ppdesc && pdesc) { if ((stride == pstride) &&
(sg_dma_len(ppsg) == sg_dma_len(psg))) {
dev_dbg(chan2dev(chan), "%s: desc 0x%p can be merged with desc 0x%p\n",
__func__, pdesc, ppdesc);
/* * Increment the block count of the * N-2 descriptor
*/
at_xdmac_increment_block_count(chan, ppdesc);
ppdesc->lld.mbr_dus = stride;
/* * Put back the N-1 descriptor in the * free descriptor list
*/
list_add_tail(&pdesc->desc_node,
&atchan->free_descs_list);
/* * Make our N-1 descriptor pointer * point to the N-2 since they were * actually merged.
*/
pdesc = ppdesc;
/* * Rule out the case where we don't have * pstride computed yet (our second sg * element) * * We also want to catch the case where there * would be a negative stride,
*/
} elseif (pstride ||
sg_dma_address(sg) < sg_dma_address(psg)) { /* * Queue the N-1 descriptor after the * N-2
*/
at_xdmac_queue_desc(chan, ppdesc, pdesc);
/* * Add the N-1 descriptor to the list * of the descriptors used for this * transfer
*/
list_add_tail(&desc->desc_node,
&first->descs_list);
dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
__func__, desc, first);
}
}
/* * If we are the last element, just see if we have the * same size than the previous element. * * If so, we can merge it with the previous descriptor * since we don't care about the stride anymore.
*/ if ((i == (sg_len - 1)) &&
sg_dma_len(psg) == sg_dma_len(sg)) {
dev_dbg(chan2dev(chan), "%s: desc 0x%p can be merged with desc 0x%p\n",
__func__, desc, pdesc);
/* * Increment the block count of the N-1 * descriptor
*/
at_xdmac_increment_block_count(chan, pdesc);
pdesc->lld.mbr_dus = stride;
/* * Put back the N descriptor in the free * descriptor list
*/
list_add_tail(&desc->desc_node,
&atchan->free_descs_list);
}
/* * If the transfer has not been started yet, don't need to compute the * residue, it's the transfer length.
*/ if (!desc->active_xfer) {
dma_set_residue(txstate, desc->xfer_size); goto spin_unlock;
}
residue = desc->xfer_size; /* * Flush FIFO: only relevant when the transfer is source peripheral * synchronized. Flush is needed before reading CUBC because data in * the FIFO are not reported by CUBC. Reporting a residue of the * transfer length while we have data in FIFO can cause issue. * Usecase: atmel USART has a timeout which means I have received * characters but there is no more character received for a while. On * timeout, it requests the residue. If the data are in the DMA FIFO, * we will return a residue of the transfer length. It means no data * received. If an application is waiting for these data, it will hang * since we won't have another USART timeout without receiving new * data.
*/
mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM; if ((desc->lld.mbr_cfg & mask) == value) {
at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask); while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
cpu_relax();
}
/* * The easiest way to compute the residue should be to pause the DMA * but doing this can lead to miss some data as some devices don't * have FIFO. * We need to read several registers because: * - DMA is running therefore a descriptor change is possible while * reading these registers * - When the block transfer is done, the value of the CUBC register * is set to its initial value until the fetch of the next descriptor. * This value will corrupt the residue calculation so we have to skip * it. * * INITD -------- ------------ * |____________________| * _______________________ _______________ * NDA @desc2 \/ @desc3 * _______________________/\_______________ * __________ ___________ _______________ * CUBC 0 \/ MAX desc1 \/ MAX desc2 * __________/\___________/\_______________ * * Since descriptors are aligned on 64 bits, we can assume that * the update of NDA and CUBC is atomic. * Memory barriers are used to ensure the read order of the registers. * A max number of retries is set because unlikely it could never ends.
*/ for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
rmb();
cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
rmb();
initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
rmb();
cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
rmb();
if ((check_nda == cur_nda) && initd) break;
}
if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
ret = DMA_ERROR; goto spin_unlock;
}
/* * Flush FIFO: only relevant when the transfer is source peripheral * synchronized. Another flush is needed here because CUBC is updated * when the controller sends the data write command. It can lead to * report data that are not written in the memory or the device. The * FIFO flush ensures that data are really written.
*/ if ((desc->lld.mbr_cfg & mask) == value) {
at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask); while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
cpu_relax();
}
/* * Remove size of all microblocks already transferred and the current * one. Then add the remaining size to transfer of the current * microblock.
*/
descs_list = &desc->descs_list;
list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth; if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
desc = iter; break;
}
}
residue += cur_ubc << dwidth;
/* * If channel is enabled, do nothing, advance_work will be triggered * after the interruption.
*/ if (at_xdmac_chan_is_enabled(atchan) || list_empty(&atchan->xfers_list)) return;
/* Called with atchan->lock held. */ staticvoid at_xdmac_handle_error(struct at_xdmac_chan *atchan)
{ struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); struct at_xdmac_desc *bad_desc; int ret;
ret = pm_runtime_resume_and_get(atxdmac->dev); if (ret < 0) return;
/* * The descriptor currently at the head of the active list is * broken. Since we don't have any way to report errors, we'll * just have to scream loudly and try to continue with other * descriptors queued (if any).
*/ if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
dev_err(chan2dev(&atchan->chan), "read bus error!!!"); if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
dev_err(chan2dev(&atchan->chan), "write bus error!!!"); if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
/* Channel must be disabled first as it's not done automatically */
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
cpu_relax();
txd = &desc->tx_dma_desc;
dma_cookie_complete(txd); /* Remove the transfer from the transfer list. */
list_del(&desc->xfer_node);
spin_unlock_irq(&atchan->lock);
if (txd->flags & DMA_PREP_INTERRUPT)
dmaengine_desc_get_callback_invoke(txd, NULL);
dma_run_dependencies(txd);
spin_lock_irq(&atchan->lock); /* Move the xfer descriptors into the free descriptors list. */
list_splice_tail_init(&desc->descs_list, &atchan->free_descs_list);
at_xdmac_advance_work(atchan);
spin_unlock_irq(&atchan->lock);
if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status)) return 0;
ret = pm_runtime_resume_and_get(atxdmac->dev); if (ret < 0) return ret;
spin_lock_irqsave(&atchan->lock, flags);
at_xdmac_device_pause_set(atxdmac, atchan); /* Decrement runtime PM ref counter for each active descriptor. */
at_xdmac_runtime_suspend_descriptors(atchan);
/* Cancel all pending transfers. */
list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
list_del(&desc->xfer_node);
list_splice_tail_init(&desc->descs_list,
&atchan->free_descs_list); /* * We incremented the runtime PM reference count on * at_xdmac_start_xfer() for this descriptor. Now it's time * to release it.
*/ if (desc->active_xfer)
pm_runtime_put_noidle(atxdmac->dev);
}
if (!list_empty(&atchan->free_descs_list)) {
dev_err(chan2dev(chan), "can't allocate channel resources (channel not free from a previous use)\n"); return -EIO;
}
for (i = 0; i < init_nr_desc_per_channel; i++) {
desc = at_xdmac_alloc_desc(chan, GFP_KERNEL); if (!desc) { if (i == 0) {
dev_warn(chan2dev(chan), "can't allocate any descriptors\n"); return -EIO;
}
dev_warn(chan2dev(chan), "only %d descriptors have been allocated\n", i); break;
}
list_add_tail(&desc->desc_node, &atchan->free_descs_list);
}
dma_cookie_init(chan);
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.22 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.