/* * Normal DMA has 8 channels, and Dedicated DMA has another 8, so * that's 16 channels. As for endpoints, there's 29 and 21 * respectively. Given that the Normal DMA endpoints (other than * SDRAM) can be used as tx/rx, we need 78 vchans in total
*/ #define SUN4I_NDMA_NR_MAX_CHANNELS 8 #define SUN4I_DDMA_NR_MAX_CHANNELS 8 #define SUN4I_DMA_NR_MAX_CHANNELS \
(SUN4I_NDMA_NR_MAX_CHANNELS + SUN4I_DDMA_NR_MAX_CHANNELS) #define SUN4I_NDMA_NR_MAX_VCHANS (29 * 2 - 1) #define SUN4I_DDMA_NR_MAX_VCHANS 21 #define SUN4I_DMA_NR_MAX_VCHANS \
(SUN4I_NDMA_NR_MAX_VCHANS + SUN4I_DDMA_NR_MAX_VCHANS)
/* This set of SUN4I_DDMA timing parameters were found experimentally while
* working with the SPI driver and seem to make it behave correctly */ #define SUN4I_DDMA_MAGIC_SPI_PARAMETERS \
(SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(1) | \
SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(1) | \
SUN4I_DDMA_PARA_DST_WAIT_CYCLES(2) | \
SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(2))
/* * Normal DMA supports individual transfers (segments) up to 128k. * Dedicated DMA supports transfers up to 16M. We can only report * one size limit, so we have to use the smaller value.
*/ #define SUN4I_NDMA_MAX_SEG_SIZE SZ_128K #define SUN4I_DDMA_MAX_SEG_SIZE SZ_16M #define SUN4I_DMA_MAX_SEG_SIZE SUN4I_NDMA_MAX_SEG_SIZE
/* * Hardware channels / ports representation * * The hardware is used in several SoCs, with differing numbers * of channels and endpoints. This structure ties those numbers * to a certain compatible string.
*/ struct sun4i_dma_config {
u32 ndma_nr_max_channels;
u32 ndma_nr_max_vchans;
struct sun4i_dma_pchan { /* Register base of channel */ void __iomem *base; /* vchan currently being serviced */ struct sun4i_dma_vchan *vchan; /* Is this a dedicated pchan? */ int is_dedicated;
};
/* * pchans 0-priv->cfg->ndma_nr_max_channels are normal, and * priv->cfg->ndma_nr_max_channels+ are dedicated ones
*/ if (vchan->is_dedicated) {
i = priv->cfg->ndma_nr_max_channels;
max = priv->cfg->dma_nr_max_channels;
} else {
i = 0;
max = priv->cfg->ndma_nr_max_channels;
}
/* * Execute pending operations on a vchan * * When given a vchan, this function will try to acquire a suitable * pchan and, if successful, will configure it to fulfill a promise * from the next pending contract. * * This function must be called with &vchan->vc.lock held.
*/ staticint __execute_vchan_pending(struct sun4i_dma_dev *priv, struct sun4i_dma_vchan *vchan)
{ struct sun4i_dma_promise *promise = NULL; struct sun4i_dma_contract *contract = NULL; struct sun4i_dma_pchan *pchan; struct virt_dma_desc *vd; int ret;
lockdep_assert_held(&vchan->vc.lock);
/* We need a pchan to do anything, so secure one if available */
pchan = find_and_use_pchan(priv, vchan); if (!pchan) return -EBUSY;
/* * Channel endpoints must not be repeated, so if this vchan * has already submitted some work, we can't do anything else
*/ if (vchan->processing) {
dev_dbg(chan2dev(&vchan->vc.chan), "processing something to this endpoint already\n");
ret = -EBUSY; goto release_pchan;
}
do { /* Figure out which contract we're working with today */
vd = vchan_next_desc(&vchan->vc); if (!vd) {
dev_dbg(chan2dev(&vchan->vc.chan), "No pending contract found");
ret = 0; goto release_pchan;
}
contract = to_sun4i_dma_contract(vd); if (list_empty(&contract->demands)) { /* The contract has been completed so mark it as such */
list_del(&contract->vd.node);
vchan_cookie_complete(&contract->vd);
dev_dbg(chan2dev(&vchan->vc.chan), "Empty contract found and marked complete");
}
} while (list_empty(&contract->demands));
/* Now find out what we need to do */
promise = list_first_entry(&contract->demands, struct sun4i_dma_promise, list);
vchan->processing = promise;
/* ... and make it reality */ if (promise) {
vchan->contract = contract;
vchan->pchan = pchan;
set_pchan_interrupt(priv, pchan, contract->use_half_int, 1);
configure_pchan(pchan, promise);
}
if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
sconfig->src_addr_width = sconfig->dst_addr_width;
if (!sconfig->src_maxburst)
sconfig->src_maxburst = sconfig->dst_maxburst;
break;
case DMA_DEV_TO_MEM: if ((sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ||
!sconfig->src_maxburst) return -EINVAL;
if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
sconfig->dst_addr_width = sconfig->src_addr_width;
if (!sconfig->dst_maxburst)
sconfig->dst_maxburst = sconfig->src_maxburst;
break; default: return 0;
}
return 0;
}
/* * Generate a promise, to be used in a normal DMA contract. * * A NDMA promise contains all the information required to program the * normal part of the DMA Engine and get data copied. A non-executed * promise will live in the demands list on a contract. Once it has been * completed, it will be moved to the completed demands list for later freeing. * All linked promises will be freed when the corresponding contract is freed
*/ staticstruct sun4i_dma_promise *
generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
size_t len, struct dma_slave_config *sconfig, enum dma_transfer_direction direction)
{ struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device); struct sun4i_dma_promise *promise; int ret;
ret = sanitize_config(sconfig, direction); if (ret) return NULL;
promise = kzalloc(sizeof(*promise), GFP_NOWAIT); if (!promise) return NULL;
/* Source burst */
ret = priv->cfg->convert_burst(sconfig->src_maxburst); if (ret < 0) goto fail;
promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
/* Destination burst */
ret = priv->cfg->convert_burst(sconfig->dst_maxburst); if (ret < 0) goto fail;
promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
/* Source bus width */
ret = convert_buswidth(sconfig->src_addr_width); if (ret < 0) goto fail;
priv->cfg->set_src_data_width(&promise->cfg, ret);
/* Destination bus width */
ret = convert_buswidth(sconfig->dst_addr_width); if (ret < 0) goto fail;
priv->cfg->set_dst_data_width(&promise->cfg, ret);
return promise;
fail:
kfree(promise); return NULL;
}
/* * Generate a promise, to be used in a dedicated DMA contract. * * A DDMA promise contains all the information required to program the * Dedicated part of the DMA Engine and get data copied. A non-executed * promise will live in the demands list on a contract. Once it has been * completed, it will be moved to the completed demands list for later freeing. * All linked promises will be freed when the corresponding contract is freed
*/ staticstruct sun4i_dma_promise *
generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
size_t len, struct dma_slave_config *sconfig)
{ struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device); struct sun4i_dma_promise *promise; int ret;
promise = kzalloc(sizeof(*promise), GFP_NOWAIT); if (!promise) return NULL;
/* Source burst */
ret = priv->cfg->convert_burst(sconfig->src_maxburst); if (ret < 0) goto fail;
promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
/* Destination burst */
ret = priv->cfg->convert_burst(sconfig->dst_maxburst); if (ret < 0) goto fail;
promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
/* Source bus width */
ret = convert_buswidth(sconfig->src_addr_width); if (ret < 0) goto fail;
priv->cfg->set_src_data_width(&promise->cfg, ret);
/* Destination bus width */
ret = convert_buswidth(sconfig->dst_addr_width); if (ret < 0) goto fail;
priv->cfg->set_dst_data_width(&promise->cfg, ret);
return promise;
fail:
kfree(promise); return NULL;
}
/* * Generate a contract * * Contracts function as DMA descriptors. As our hardware does not support * linked lists, we need to implement SG via software. We use a contract * to hold all the pieces of the request and process them serially one * after another. Each piece is represented as a promise.
*/ staticstruct sun4i_dma_contract *generate_dma_contract(void)
{ struct sun4i_dma_contract *contract;
contract = kzalloc(sizeof(*contract), GFP_NOWAIT); if (!contract) return NULL;
/* * Get next promise on a cyclic transfer * * Cyclic contracts contain a series of promises which are executed on a * loop. This function returns the next promise from a cyclic contract, * so it can be programmed into the hardware.
*/ staticstruct sun4i_dma_promise *
get_next_cyclic_promise(struct sun4i_dma_contract *contract)
{ struct sun4i_dma_promise *promise;
contract = generate_dma_contract(); if (!contract) return NULL;
/* * We can only do the copy to bus aligned addresses, so * choose the best one so we get decent performance. We also * maximize the burst size for this same reason.
*/
sconfig->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
sconfig->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
sconfig->src_maxburst = priv->cfg->max_burst;
sconfig->dst_maxburst = priv->cfg->max_burst;
/* * We will be using half done interrupts to make two periods * out of a promise, so we need to program the DMA engine less * often
*/
/* * The engine can interrupt on half-transfer, so we can use * this feature to program the engine half as often as if we * didn't use it (keep in mind the hardware doesn't support * linked lists). * * Say you have a set of periods (| marks the start/end, I for * interrupt, P for programming the engine to do a new * transfer), the easy but slow way would be to do * * |---|---|---|---| (periods / promises) * P I,P I,P I,P I * * Using half transfer interrupts you can do * * |-------|-------| (promises as configured on hw) * |---|---|---|---| (periods) * P I I,P I I * * Which requires half the engine programming for the same * functionality. * * This only works if two periods fit in a single promise. That will * always be the case for dedicated DMA, where the hardware has a much * larger maximum transfer size than advertised to clients.
*/ if (vchan->is_dedicated || period_len <= SUN4I_NDMA_MAX_SEG_SIZE / 2) {
period_len *= 2;
contract->use_half_int = 1;
}
nr_periods = DIV_ROUND_UP(len, period_len); for (i = 0; i < nr_periods; i++) { /* Calculate the offset in the buffer and the length needed */
offset = i * period_len;
plength = min((len - offset), period_len); if (dir == DMA_MEM_TO_DEV)
src = buf + offset; else
dest = buf + offset;
/* Make the promise */ if (vchan->is_dedicated)
promise = generate_ddma_promise(chan, src, dest,
plength, sconfig); else
promise = generate_ndma_promise(chan, src, dest,
plength, sconfig, dir);
if (!promise) { /* TODO: should we free everything? */ return NULL;
}
promise->cfg |= endpoints;
/* Then add it to the contract */
list_add_tail(&promise->list, &contract->demands);
}
/* And add it to the vchan */ return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
}
/* * These are the magic DMA engine timings that keep SPI going. * I haven't seen any interface on DMAEngine to configure * timings, and so far they seem to work for everything we * support, so I've kept them here. I don't know if other * devices need different timings because, as usual, we only * have the "para" bitfield meanings, but no comment on what * the values should be when doing a certain operation :|
*/
para = SUN4I_DDMA_MAGIC_SPI_PARAMETERS;
/* And make a suitable promise */ if (vchan->is_dedicated)
promise = generate_ddma_promise(chan, srcaddr, dstaddr,
sg_dma_len(sg),
sconfig); else
promise = generate_ndma_promise(chan, srcaddr, dstaddr,
sg_dma_len(sg),
sconfig, dir);
if (!promise) return NULL; /* TODO: should we free everything? */
promise->cfg |= endpoints;
promise->para = para;
/* Then add it to the contract */
list_add_tail(&promise->list, &contract->demands);
}
/* * Once we've got all the promises ready, add the contract * to the pending list on the vchan
*/ return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
}
/* * Clearing the configuration register will halt the pchan. Interrupts * may still trigger, so don't forget to disable them.
*/ if (pchan) { if (pchan->is_dedicated)
writel(0, pchan->base + SUN4I_DDMA_CFG_REG); else
writel(0, pchan->base + SUN4I_NDMA_CFG_REG);
set_pchan_interrupt(priv, pchan, 0, 0);
release_pchan(priv, pchan);
}
spin_lock_irqsave(&vchan->vc.lock, flags); /* Clear these so the vchan is usable again */
vchan->processing = NULL;
vchan->pchan = NULL;
spin_unlock_irqrestore(&vchan->vc.lock, flags);
/* * The hardware is configured to return the remaining byte * quantity. If possible, replace the first listed element's * full size with the actual remaining amount
*/
promise = list_first_entry_or_null(&contract->demands, struct sun4i_dma_promise, list); if (promise && pchan) {
bytes -= promise->len; if (pchan->is_dedicated)
bytes += readl(pchan->base + SUN4I_DDMA_BYTE_COUNT_REG); else
bytes += readl(pchan->base + SUN4I_NDMA_BYTE_COUNT_REG);
}
/* * If there are pending transactions for this vchan, push one of * them into the engine to get the ball rolling.
*/ if (vchan_issue_pending(&vchan->vc))
__execute_vchan_pending(priv, vchan);
for_each_set_bit(bit, &pendirq, 32) {
pchan = &pchans[bit >> 1];
vchan = pchan->vchan; if (!vchan) /* a terminated channel may still interrupt */ continue;
contract = vchan->contract;
/* * Disable the IRQ and free the pchan if it's an end * interrupt (odd bit)
*/ if (bit & 1) {
spin_lock(&vchan->vc.lock);
/* * Move the promise into the completed list now that * we're done with it
*/
list_move_tail(&vchan->processing->list,
&contract->completed_demands);
/* * Cyclic DMA transfers are special: * - There's always something we can dispatch * - We need to run the callback * - Latency is very important, as this is used by audio * We therefore just cycle through the list and dispatch * whatever we have here, reusing the pchan. There's * no need to run the thread after this. * * For non-cyclic transfers we need to look around, * so we can program some more work, or notify the * client that their transfers have been completed.
*/ if (contract->is_cyclic) {
promise = get_next_cyclic_promise(contract);
vchan->processing = promise;
configure_pchan(pchan, promise);
vchan_cyclic_callback(&contract->vd);
} else {
vchan->processing = NULL;
vchan->pchan = NULL;
/* Disable the IRQs for events we handled */
spin_lock(&priv->lock);
irqs = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
writel_relaxed(irqs & ~disableirqs,
priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
spin_unlock(&priv->lock);
/* Writing 1 to the pending field will clear the pending interrupt */
writel_relaxed(pendirq, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
/* * If a pchan was freed, we may be able to schedule something else, * so have a look around
*/ if (free_room) { for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) {
vchan = &priv->vchans[i];
spin_lock(&vchan->vc.lock);
__execute_vchan_pending(priv, vchan);
spin_unlock(&vchan->vc.lock);
}
}
/* * Handle newer interrupts if some showed up, but only do it once * to avoid a too long a loop
*/ if (allow_mitigation) {
pendirq = readl_relaxed(priv->base +
SUN4I_DMA_IRQ_PENDING_STATUS_REG); if (pendirq) {
allow_mitigation = 0; goto handle_pending;
}
}
return IRQ_HANDLED;
}
staticint sun4i_dma_probe(struct platform_device *pdev)
{ struct sun4i_dma_dev *priv; int i, j, ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM;
priv->cfg = of_device_get_match_data(&pdev->dev); if (!priv->cfg) return -ENODEV;
priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base);
priv->irq = platform_get_irq(pdev, 0); if (priv->irq < 0) return priv->irq;
priv->clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(priv->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk), "Couldn't start the clock\n");
if (priv->cfg->has_reset) {
priv->rst = devm_reset_control_get_exclusive_deasserted(&pdev->dev, NULL); if (IS_ERR(priv->rst)) return dev_err_probe(&pdev->dev, PTR_ERR(priv->rst), "Failed to get reset control\n");
}
/* * [0..priv->cfg->ndma_nr_max_channels) are normal pchans, and * [priv->cfg->ndma_nr_max_channels..priv->cfg->dma_nr_max_channels) are * dedicated ones
*/ for (i = 0; i < priv->cfg->ndma_nr_max_channels; i++)
priv->pchans[i].base = priv->base +
SUN4I_NDMA_CHANNEL_REG_BASE(i);
for (j = 0; i < priv->cfg->dma_nr_max_channels; i++, j++) {
priv->pchans[i].base = priv->base +
SUN4I_DDMA_CHANNEL_REG_BASE(j);
priv->pchans[i].is_dedicated = 1;
}
for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) { struct sun4i_dma_vchan *vchan = &priv->vchans[i];
/* * Make sure the IRQs are all disabled and accounted for. The bootloader * likes to leave these dirty
*/
writel(0, priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
writel(0xFFFFFFFF, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
ret = devm_request_irq(&pdev->dev, priv->irq, sun4i_dma_interrupt,
0, dev_name(&pdev->dev), priv); if (ret) return dev_err_probe(&pdev->dev, ret, "Cannot request IRQ\n");
ret = dmaenginem_async_device_register(&priv->slave); if (ret) return dev_err_probe(&pdev->dev, ret, "Failed to register DMA engine device\n");
ret = of_dma_controller_register(pdev->dev.of_node, sun4i_dma_of_xlate,
priv); if (ret) return dev_err_probe(&pdev->dev, ret, "Failed to register translation function\n");
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.