/** * struct mtk_cqdma_vdesc - The struct holding info describing virtual * descriptor (CVD) * @vd: An instance for struct virt_dma_desc * @len: The total data size device wants to move * @residue: The remaining data size device will move * @dest: The destination address device wants to move to * @src: The source address device wants to move from * @ch: The pointer to the corresponding dma channel * @node: The lise_head struct to build link-list for VDs * @parent: The pointer to the parent CVD
*/ struct mtk_cqdma_vdesc { struct virt_dma_desc vd;
size_t len;
size_t residue;
dma_addr_t dest;
dma_addr_t src; struct dma_chan *ch;
/** * struct mtk_cqdma_pchan - The struct holding info describing physical * channel (PC) * @queue: Queue for the PDs issued to this PC * @base: The mapped register I/O base of this PC * @irq: The IRQ that this PC are using * @refcnt: Track how many VCs are using this PC * @tasklet: Tasklet for this PC * @lock: Lock protect agaisting multiple VCs access PC
*/ struct mtk_cqdma_pchan { struct list_head queue; void __iomem *base;
u32 irq;
refcount_t refcnt;
struct tasklet_struct tasklet;
/* lock to protect PC */
spinlock_t lock;
};
/** * struct mtk_cqdma_vchan - The struct holding info describing virtual * channel (VC) * @vc: An instance for struct virt_dma_chan * @pc: The pointer to the underlying PC * @issue_completion: The wait for all issued descriptors completited * @issue_synchronize: Bool indicating channel synchronization starts
*/ struct mtk_cqdma_vchan { struct virt_dma_chan vc; struct mtk_cqdma_pchan *pc; struct completion issue_completion; bool issue_synchronize;
};
/** * struct mtk_cqdma_device - The struct holding info describing CQDMA * device * @ddev: An instance for struct dma_device * @clk: The clock that device internal is using * @dma_requests: The number of VCs the device supports to * @dma_channels: The number of PCs the device supports to * @vc: The pointer to all available VCs * @pc: The pointer to all the underlying PCs
*/ struct mtk_cqdma_device { struct dma_device ddev; struct clk *clk;
staticvoid mtk_cqdma_start(struct mtk_cqdma_pchan *pc, struct mtk_cqdma_vdesc *cvd)
{ /* wait for the previous transaction done */ if (mtk_cqdma_poll_engine_done(pc, true) < 0)
dev_err(cqdma2dev(to_cqdma_dev(cvd->ch)), "cqdma wait transaction timeout\n");
/* warm reset the dma engine for the new transaction */
mtk_dma_set(pc, MTK_CQDMA_RESET, MTK_CQDMA_WARM_RST_BIT); if (mtk_cqdma_poll_engine_done(pc, true) < 0)
dev_err(cqdma2dev(to_cqdma_dev(cvd->ch)), "cqdma warm reset timeout\n");
list_for_each_entry_safe(vd, vd2, &cvc->vc.desc_issued, node) { /* need to trigger dma engine if PC's queue is empty */ if (list_empty(&pc->queue))
trigger_engine = true;
cvd = to_cqdma_vdesc(vd);
/* add VD into PC's queue */
list_add_tail(&cvd->node, &pc->queue);
/* start the dma engine */ if (trigger_engine)
mtk_cqdma_start(pc, cvd);
/* remove VD from list desc_issued */
list_del(&vd->node);
}
}
/* * return true if this VC is active, * meaning that there are VDs under processing by the PC
*/ staticbool mtk_cqdma_is_vchan_active(struct mtk_cqdma_vchan *cvc)
{ struct mtk_cqdma_vdesc *cvd;
list_for_each_entry(cvd, &cvc->pc->queue, node) if (cvc == to_cqdma_vchan(cvd->ch)) returntrue;
returnfalse;
}
/* * return the pointer of the CVD that is just consumed by the PC
*/ staticstruct mtk_cqdma_vdesc
*mtk_cqdma_consume_work_queue(struct mtk_cqdma_pchan *pc)
{ struct mtk_cqdma_vchan *cvc; struct mtk_cqdma_vdesc *cvd, *ret = NULL;
/* consume a CVD from PC's queue */
cvd = list_first_entry_or_null(&pc->queue, struct mtk_cqdma_vdesc, node); if (unlikely(!cvd || !cvd->parent)) return NULL;
cvc = to_cqdma_vchan(cvd->ch);
ret = cvd;
/* update residue of the parent CVD */
cvd->parent->residue -= cvd->len;
/* delete CVD from PC's queue */
list_del(&cvd->node);
spin_lock(&cvc->vc.lock);
/* check whether all the child CVDs completed */ if (!cvd->parent->residue) { /* add the parent VD into list desc_completed */
vchan_cookie_complete(&cvd->parent->vd);
/* setup completion if this VC is under synchronization */ if (cvc->issue_synchronize && !mtk_cqdma_is_vchan_active(cvc)) {
complete(&cvc->issue_completion);
cvc->issue_synchronize = false;
}
}
spin_unlock(&cvc->vc.lock);
/* start transaction for next CVD in the queue */
cvd = list_first_entry_or_null(&pc->queue, struct mtk_cqdma_vdesc, node); if (cvd)
mtk_cqdma_start(pc, cvd);
/* clear interrupt flags for each PC */ for (i = 0; i < cqdma->dma_channels; ++i, schedule_tasklet = false) {
spin_lock(&cqdma->pc[i]->lock); if (mtk_dma_read(cqdma->pc[i],
MTK_CQDMA_INT_FLAG) & MTK_CQDMA_INT_FLAG_BIT) { /* clear interrupt */
mtk_dma_clr(cqdma->pc[i], MTK_CQDMA_INT_FLAG,
MTK_CQDMA_INT_FLAG_BIT);
schedule_tasklet = true;
ret = IRQ_HANDLED;
}
spin_unlock(&cqdma->pc[i]->lock);
if (schedule_tasklet) { /* disable interrupt */
disable_irq_nosync(cqdma->pc[i]->irq);
/* schedule the tasklet to handle the transactions */
tasklet_schedule(&cqdma->pc[i]->tasklet);
}
}
/* * In the case that trsanction length is larger than the * DMA engine supports, a single memcpy transaction needs * to be separated into several DMA transactions. * Each DMA transaction would be described by a CVD, * and the first one is referred as the parent CVD, * while the others are child CVDs. * The parent CVD's tx descriptor is the only tx descriptor * returned to the DMA user, and it should not be completed * until all the child CVDs completed.
*/
nr_vd = DIV_ROUND_UP(len, MTK_CQDMA_MAX_LEN);
cvd = kcalloc(nr_vd, sizeof(*cvd), GFP_NOWAIT); if (!cvd) return NULL;
for (i = 0; i < nr_vd; ++i) {
cvd[i] = kzalloc(sizeof(*cvd[i]), GFP_NOWAIT); if (!cvd[i]) { for (; i > 0; --i)
kfree(cvd[i - 1]); return NULL;
}
/* * set desc_allocated, desc_submitted, * and desc_issued as the candicates to be freed
*/
spin_lock_irqsave(&vc->lock, flags);
list_splice_tail_init(&vc->desc_allocated, &head);
list_splice_tail_init(&vc->desc_submitted, &head);
list_splice_tail_init(&vc->desc_issued, &head);
spin_unlock_irqrestore(&vc->lock, flags);
/* acquire PC's lock first due to lock dependency in dma ISR */
spin_lock_irqsave(&cvc->pc->lock, pc_flags);
spin_lock_irqsave(&cvc->vc.lock, vc_flags);
/* synchronization is required if this VC is active */ if (mtk_cqdma_is_vchan_active(cvc)) {
cvc->issue_synchronize = true;
sync_needed = true;
}
/* allocate PC with the minimum refcount */ for (i = 0; i < cqdma->dma_channels; ++i) {
refcnt = refcount_read(&cqdma->pc[i]->refcnt); if (refcnt < min_refcnt) {
pc = cqdma->pc[i];
min_refcnt = refcnt;
}
}
if (!pc) return -ENOSPC;
spin_lock_irqsave(&pc->lock, flags);
if (!refcount_read(&pc->refcnt)) { /* allocate PC when the refcount is zero */
mtk_cqdma_hard_reset(pc);
/* enable interrupt for this PC */
mtk_dma_set(pc, MTK_CQDMA_INT_EN, MTK_CQDMA_INT_EN_BIT);
/* * refcount_inc would complain increment on 0; use-after-free. * Thus, we need to explicitly set it as 1 initially.
*/
refcount_set(&pc->refcnt, 1);
} else {
refcount_inc(&pc->refcnt);
}
/* free all descriptors in all lists on the VC */
mtk_cqdma_terminate_all(c);
spin_lock_irqsave(&cvc->pc->lock, flags);
/* PC is not freed until there is no VC mapped to it */ if (refcount_dec_and_test(&cvc->pc->refcnt)) { /* start the flush operation and stop the engine */
mtk_dma_set(cvc->pc, MTK_CQDMA_FLUSH, MTK_CQDMA_FLUSH_BIT);
/* wait for the completion of flush operation */ if (mtk_cqdma_poll_engine_done(cvc->pc, true) < 0)
dev_err(cqdma2dev(to_cqdma_dev(c)), "cqdma flush timeout\n");
/* clear the flush bit and interrupt flag */
mtk_dma_clr(cvc->pc, MTK_CQDMA_FLUSH, MTK_CQDMA_FLUSH_BIT);
mtk_dma_clr(cvc->pc, MTK_CQDMA_INT_FLAG,
MTK_CQDMA_INT_FLAG_BIT);
/* disable interrupt for this PC */
mtk_dma_clr(cvc->pc, MTK_CQDMA_INT_EN, MTK_CQDMA_INT_EN_BIT);
}
if (err) {
pm_runtime_put_sync(cqdma2dev(cqdma));
pm_runtime_disable(cqdma2dev(cqdma)); return err;
}
/* reset all PCs */ for (i = 0; i < cqdma->dma_channels; ++i) {
spin_lock_irqsave(&cqdma->pc[i]->lock, flags); if (mtk_cqdma_hard_reset(cqdma->pc[i]) < 0) {
dev_err(cqdma2dev(cqdma), "cqdma hard reset timeout\n");
spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags);
/* reset all PCs */ for (i = 0; i < cqdma->dma_channels; ++i) {
spin_lock_irqsave(&cqdma->pc[i]->lock, flags); if (mtk_cqdma_hard_reset(cqdma->pc[i]) < 0)
dev_err(cqdma2dev(cqdma), "cqdma hard reset timeout\n");
spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags);
}
if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node, "dma-requests",
&cqdma->dma_requests)) {
dev_info(&pdev->dev, "Using %u as missing dma-requests property\n",
MTK_CQDMA_NR_VCHANS);
cqdma->dma_requests = MTK_CQDMA_NR_VCHANS;
}
if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node, "dma-channels",
&cqdma->dma_channels)) {
dev_info(&pdev->dev, "Using %u as missing dma-channels property\n",
MTK_CQDMA_NR_PCHANS);
cqdma->dma_channels = MTK_CQDMA_NR_PCHANS;
}
cqdma->pc = devm_kcalloc(&pdev->dev, cqdma->dma_channels, sizeof(*cqdma->pc), GFP_KERNEL); if (!cqdma->pc) return -ENOMEM;
/* initialization for PCs */ for (i = 0; i < cqdma->dma_channels; ++i) {
cqdma->pc[i] = devm_kcalloc(&pdev->dev, 1, sizeof(**cqdma->pc), GFP_KERNEL); if (!cqdma->pc[i]) return -ENOMEM;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.