/* * Number of entries in sgt returned from spi framework that- * will be supported. Can be modified as required. * In practice, given max_dma_len is 64KB, the number of * entries is not expected to exceed 1.
*/ #define QSPI_MAX_SG 5
/* free cmd descriptors if they are around (DMA mode) */ for (i = 0; i < ctrl->n_cmd_desc; i++)
dma_pool_free(ctrl->dma_cmd_pool, ctrl->virt_cmd_desc[i],
ctrl->dma_cmd_desc[i]);
ctrl->n_cmd_desc = 0;
spin_unlock_irqrestore(&ctrl->lock, flags);
}
/* In regular operation (SBL_EN=1) core must be 4x transfer clock */
ret = dev_pm_opp_set_rate(ctrl->dev, speed_hz * 4); if (ret) {
dev_err(ctrl->dev, "Failed to set core clk %d\n", ret); return ret;
}
/* * Set BW quota for CPU. * We don't have explicit peak requirement so keep it equal to avg_bw.
*/
avg_bw_cpu = Bps_to_icc(speed_hz);
ret = icc_set_bw(ctrl->icc_path_cpu_to_qspi, avg_bw_cpu, avg_bw_cpu); if (ret) {
dev_err(ctrl->dev, "%s: ICC BW voting failed for cpu: %d\n",
__func__, ret); return ret;
}
for (i = 0; i < sgt->nents; i++) {
dma_ptr_sg = sg_dma_address(sgt->sgl + i);
dma_len_sg = sg_dma_len(sgt->sgl + i); if (!IS_ALIGNED(dma_ptr_sg, QSPI_ALIGN_REQ)) {
dev_warn_once(ctrl->dev, "dma_address not aligned to %d\n", QSPI_ALIGN_REQ); return -EAGAIN;
} /* * When reading with DMA the controller writes to memory 1 word * at a time. If the length isn't a multiple of 4 bytes then * the controller can clobber the things later in memory. * Fallback to PIO to be safe.
*/ if (ctrl->xfer.dir == QSPI_READ && (dma_len_sg & 0x03)) {
dev_warn_once(ctrl->dev, "fallback to PIO for read of size %#010x\n",
dma_len_sg); return -EAGAIN;
}
}
for (i = 0; i < sgt->nents; i++) {
dma_ptr_sg = sg_dma_address(sgt->sgl + i);
dma_len_sg = sg_dma_len(sgt->sgl + i);
ret = qcom_qspi_alloc_desc(ctrl, dma_ptr_sg, dma_len_sg); if (ret) goto cleanup;
} return 0;
cleanup: for (i = 0; i < ctrl->n_cmd_desc; i++)
dma_pool_free(ctrl->dma_cmd_pool, ctrl->virt_cmd_desc[i],
ctrl->dma_cmd_desc[i]);
ctrl->n_cmd_desc = 0; return ret;
}
/* We are half duplex, so either rx or tx will be set */ if (xfer->rx_buf) {
ctrl->xfer.dir = QSPI_READ;
ctrl->xfer.buswidth = xfer->rx_nbits;
ctrl->xfer.rx_buf = xfer->rx_buf;
} else {
ctrl->xfer.dir = QSPI_WRITE;
ctrl->xfer.buswidth = xfer->tx_nbits;
ctrl->xfer.tx_buf = xfer->tx_buf;
}
ctrl->xfer.is_last = list_is_last(&xfer->transfer_list,
&host->cur_msg->transfers);
ctrl->xfer.rem_bytes = xfer->len;
if (xfer->rx_sg.nents || xfer->tx_sg.nents) { /* do DMA transfer */ if (!(mstr_cfg & DMA_ENABLE)) {
mstr_cfg |= DMA_ENABLE;
writel(mstr_cfg, ctrl->base + MSTR_CONFIG);
}
ret = qcom_qspi_setup_dma_desc(ctrl, xfer); if (ret != -EAGAIN) { if (!ret) {
dma_wmb();
qcom_qspi_dma_xfer(ctrl);
} gotoexit;
}
dev_warn_once(ctrl->dev, "DMA failure, falling back to PIO\n");
ret = 0; /* We'll retry w/ PIO */
}
if (ctrl->xfer.rem_bytes < QSPI_BYTES_PER_WORD) { /* Process the last 1-3 bytes */
wr_size = min(wr_fifo_bytes, ctrl->xfer.rem_bytes);
ctrl->xfer.rem_bytes -= wr_size;
byte_buf = xfer_buf; while (wr_size--)
writel(*byte_buf++,
ctrl->base + PIO_DATAOUT_1B);
ctrl->xfer.tx_buf = byte_buf;
} else { /* * Process all the whole words; to keep things simple we'll * just wait for the next interrupt to handle the last 1-3 * bytes if we don't have an even number of words.
*/
rem_words = ctrl->xfer.rem_bytes / QSPI_BYTES_PER_WORD;
wr_fifo_words = wr_fifo_bytes / QSPI_BYTES_PER_WORD;
/* PIO mode handling */ if (ctrl->xfer.dir == QSPI_WRITE) { if (int_status & WR_FIFO_EMPTY)
ret = pio_write(ctrl);
} else { if (int_status & RESP_FIFO_RDY)
ret = pio_read(ctrl);
}
if (int_status & QSPI_ERR_IRQS) { if (int_status & RESP_FIFO_UNDERRUN)
dev_err(ctrl->dev, "IRQ error: FIFO underrun\n"); if (int_status & WR_FIFO_OVERRUN)
dev_err(ctrl->dev, "IRQ error: FIFO overrun\n"); if (int_status & HRESP_FROM_NOC_ERR)
dev_err(ctrl->dev, "IRQ error: NOC response error\n");
ret = IRQ_HANDLED;
}
if (!ctrl->xfer.rem_bytes) {
writel(0, ctrl->base + MSTR_INT_EN);
spi_finalize_current_transfer(dev_get_drvdata(ctrl->dev));
}
/* DMA mode handling */ if (int_status & DMA_CHAIN_DONE) { int i;
for (i = 0; i < ctrl->n_cmd_desc; i++)
dma_pool_free(ctrl->dma_cmd_pool, ctrl->virt_cmd_desc[i],
ctrl->dma_cmd_desc[i]);
ctrl->n_cmd_desc = 0;
ret = IRQ_HANDLED;
spi_finalize_current_transfer(dev_get_drvdata(ctrl->dev));
}
spin_unlock(&ctrl->lock); return ret;
}
staticint qcom_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{ /* * If qcom_qspi_can_dma() is going to return false we don't need to * adjust anything.
*/ if (op->data.nbytes <= QSPI_MAX_BYTES_FIFO) return 0;
/* * When reading, the transfer needs to be a multiple of 4 bytes so * shrink the transfer if that's not true. The caller will then do a * second transfer to finish things up.
*/ if (op->data.dir == SPI_MEM_DATA_IN && (op->data.nbytes & 0x3))
op->data.nbytes &= ~0x3;
ctrl->clks = devm_kcalloc(dev, QSPI_NUM_CLKS, sizeof(*ctrl->clks), GFP_KERNEL); if (!ctrl->clks) return -ENOMEM;
ctrl->clks[QSPI_CLK_CORE].id = "core";
ctrl->clks[QSPI_CLK_IFACE].id = "iface";
ret = devm_clk_bulk_get(dev, QSPI_NUM_CLKS, ctrl->clks); if (ret) return ret;
ctrl->icc_path_cpu_to_qspi = devm_of_icc_get(dev, "qspi-config"); if (IS_ERR(ctrl->icc_path_cpu_to_qspi)) return dev_err_probe(dev, PTR_ERR(ctrl->icc_path_cpu_to_qspi), "Failed to get cpu path\n");
/* Set BW vote for register access */
ret = icc_set_bw(ctrl->icc_path_cpu_to_qspi, Bps_to_icc(1000),
Bps_to_icc(1000)); if (ret) {
dev_err(ctrl->dev, "%s: ICC BW voting failed for cpu: %d\n",
__func__, ret); return ret;
}
ret = icc_disable(ctrl->icc_path_cpu_to_qspi); if (ret) {
dev_err(ctrl->dev, "%s: ICC disable failed for cpu: %d\n",
__func__, ret); return ret;
}
ret = platform_get_irq(pdev, 0); if (ret < 0) return ret;
ret = devm_request_irq(dev, ret, qcom_qspi_irq, 0, dev_name(dev), ctrl); if (ret) {
dev_err(dev, "Failed to request irq %d\n", ret); return ret;
}
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); if (ret) return dev_err_probe(dev, ret, "could not set DMA mask\n");
ret = devm_pm_opp_set_clkname(&pdev->dev, "core"); if (ret) return ret; /* OPP table is optional */
ret = devm_pm_opp_of_add_table(&pdev->dev); if (ret && ret != -ENODEV) {
dev_err(&pdev->dev, "invalid OPP table in device tree\n"); return ret;
}
ret = qcom_qspi_alloc_dma(ctrl); if (ret) return ret;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.