/* * Pop one word from the TX buffer for pushing into the * PUSHR register (TX FIFO)
*/ static u32 dspi_pop_tx(struct fsl_dspi *dspi)
{
u32 txdata = 0;
if (dspi->tx)
dspi->host_to_dev(dspi, &txdata);
dspi->len -= dspi->oper_word_size; return txdata;
}
/* Prepare one TX FIFO entry (txdata plus cmd) */ static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
{
u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi);
if (spi_controller_is_target(dspi->ctlr)) return data;
/* Push one word to the RX buffer from the POPR register (RX FIFO) */ staticvoid dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata)
{ if (!dspi->rx) return;
dspi->dev_to_host(dspi, rxdata);
}
staticint dspi_dma_xfer(struct fsl_dspi *dspi)
{ struct spi_message *message = dspi->cur_msg; struct device *dev = &dspi->pdev->dev; int ret = 0;
/* * dspi->len gets decremented by dspi_pop_tx_pushr in * dspi_next_xfer_dma_submit
*/ while (dspi->len) { /* Figure out operational bits-per-word for this chunk */
dspi_setup_accel(dspi);
if (minscale == INT_MAX) {
pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld, we use the max prescaler value.\n",
speed_hz, clkrate);
*pbr = ARRAY_SIZE(pbr_tbl) - 1;
*br = ARRAY_SIZE(brs) - 1;
}
}
staticvoid ns_delay_scale(char *psc, char *sc, int delay_ns, unsignedlong clkrate)
{ int scale_needed, scale, minscale = INT_MAX; int pscale_tbl[4] = {1, 3, 5, 7};
u32 remainder; int i, j;
scale_needed = div_u64_rem((u64)delay_ns * clkrate, NSEC_PER_SEC,
&remainder); if (remainder)
scale_needed++;
for (i = 0; i < ARRAY_SIZE(pscale_tbl); i++) for (j = 0; j <= SPI_CTAR_SCALE_BITS; j++) {
scale = pscale_tbl[i] * (2 << j); if (scale >= scale_needed) { if (scale < minscale) {
minscale = scale;
*psc = i;
*sc = j;
} break;
}
}
if (minscale == INT_MAX) {
pr_warn("Cannot find correct scale values for %dns delay at clkrate %ld, using max prescaler value",
delay_ns, clkrate);
*psc = ARRAY_SIZE(pscale_tbl) - 1;
*sc = SPI_CTAR_SCALE_BITS;
}
}
staticvoid dspi_pushr_cmd_write(struct fsl_dspi *dspi, u16 cmd)
{ /* * The only time when the PCS doesn't need continuation after this word * is when it's last. We need to look ahead, because we actually call * dspi_pop_tx (the function that decrements dspi->len) _after_ * dspi_pushr_cmd_write with XSPI mode. As for how much in advance? One * word is enough. If there's more to transmit than that, * dspi_xspi_write will know to split the FIFO writes in 2, and * generate a new PUSHR command with the final word that will have PCS * deasserted (not continued) here.
*/ if (dspi->len > dspi->oper_word_size)
cmd |= SPI_PUSHR_CMD_CONT;
regmap_write(dspi->regmap_pushr, dspi->pushr_cmd, cmd);
}
staticvoid dspi_xspi_fifo_write(struct fsl_dspi *dspi, int num_words)
{ int num_bytes = num_words * dspi->oper_word_size;
u16 tx_cmd = dspi->tx_cmd;
/* * If the PCS needs to de-assert (i.e. we're at the end of the buffer * and cs_change does not want the PCS to stay on), then we need a new * PUSHR command, since this one (for the body of the buffer) * necessarily has the CONT bit set. * So send one word less during this go, to force a split and a command * with a single word next time, when CONT will be unset.
*/ if (!(dspi->tx_cmd & SPI_PUSHR_CMD_CONT) && num_bytes == dspi->len)
tx_cmd |= SPI_PUSHR_CMD_EOQ;
/* * No accel for DMA transfers or frames not multiples of 8 bits at the * moment.
*/ if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE ||
xfer->bits_per_word % 8) goto no_accel;
if (!odd && dspi->len <= dspi->devtype_data->fifo_size * 2) {
dspi->oper_bits_per_word = 16;
} elseif (odd && dspi->len <= dspi->devtype_data->fifo_size) {
dspi->oper_bits_per_word = 8;
} else { /* Start off with maximum supported by hardware */
dspi->oper_bits_per_word = 32;
/* * And go down only if the buffer can't be sent with * words this big
*/ do { if (dspi->len >= DIV_ROUND_UP(dspi->oper_bits_per_word, 8)) break;
dspi->oper_bits_per_word /= 2;
} while (dspi->oper_bits_per_word > 8);
}
/* * Update CTAR here (code is common for XSPI and DMA modes). * We will update CTARE in the portion specific to XSPI, when we * also know the preload value (DTCP).
*/
regmap_write(dspi->regmap, SPI_CTAR(0),
dspi->cur_chip->ctar_val |
SPI_FRAME_BITS(dspi->oper_bits_per_word));
}
/* In XSPI mode each 32-bit word occupies 2 TX FIFO entries */ if (dspi->oper_word_size == 4)
num_fifo_entries /= 2;
/* * Integer division intentionally trims off odd (or non-multiple of 4) * numbers of bytes at the end of the buffer, which will be sent next * time using a smaller oper_word_size.
*/
num_words = dspi->len / dspi->oper_word_size; if (num_words > num_fifo_entries)
num_words = num_fifo_entries;
/* Update total number of bytes that were transferred */
num_bytes = num_words * dspi->oper_word_size;
msg->actual_length += num_bytes;
dspi->progress += num_bytes / DIV_ROUND_UP(xfer->bits_per_word, 8);
/* * Update shared variable for use in the next interrupt (both in * dspi_fifo_read and in dspi_fifo_write).
*/
dspi->words_in_flight = num_words;
dspi_xspi_fifo_write(dspi, num_words); /* * Everything after this point is in a potential race with the next * interrupt, so we must never use dspi->words_in_flight again since it * might already be modified by the next dspi_fifo_write.
*/
/* Prepare command word for CMD FIFO */
dspi->tx_cmd = SPI_PUSHR_CMD_CTAS(0); if (!spi_get_csgpiod(spi, 0))
dspi->tx_cmd |= SPI_PUSHR_CMD_PCS(spi_get_chipselect(spi, 0));
if (list_is_last(&dspi->cur_transfer->transfer_list,
&dspi->cur_msg->transfers)) { /* Leave PCS activated after last transfer when * cs_change is set.
*/ if (transfer->cs_change)
dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
} else { /* Keep PCS active between transfers in same message * when cs_change is not set, and de-activate PCS * between transfers in the same message when * cs_change is set.
*/ if (!transfer->cs_change)
dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
}
if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
status = dspi_dma_xfer(dspi);
} else { /* * Reinitialize the completion before transferring data * to avoid the case where it might remain in the done * state due to a spurious interrupt from a previous * transfer. This could falsely signal that the current * transfer has completed.
*/ if (dspi->irq)
reinit_completion(&dspi->xfer_done);
dspi_fifo_write(dspi);
if (dspi->irq) {
wait_for_completion(&dspi->xfer_done);
} else { do {
status = dspi_poll(dspi);
} while (status == -EINPROGRESS);
}
} if (status) break;
spi_transfer_delay_exec(transfer);
if (!(dspi->tx_cmd & SPI_PUSHR_CMD_CONT))
dspi_deassert_cs(spi, &cs);
}
if (status || !cs_change) { /* Put DSPI in stop mode */
regmap_update_bits(dspi->regmap, SPI_MCR,
SPI_MCR_HALT, SPI_MCR_HALT); while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 &&
val & SPI_SR_TXRXS)
;
}
/* Only alloc on first setup */
chip = spi_get_ctldata(spi); if (chip == NULL) {
chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); if (!chip) return -ENOMEM;
}
pdata = dev_get_platdata(&dspi->pdev->dev);
if (!pdata) {
val = spi_delay_to_ns(&spi->cs_setup, NULL);
cs_sck_delay = val >= 0 ? val : 0; if (!cs_sck_delay)
of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
&cs_sck_delay);
val = spi_delay_to_ns(&spi->cs_hold, NULL);
sck_cs_delay = val >= 0 ? val : 0; if (!sck_cs_delay)
of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
&sck_cs_delay);
} else {
cs_sck_delay = pdata->cs_sck_delay;
sck_cs_delay = pdata->sck_cs_delay;
}
/* Since tCSC and tASC apply to continuous transfers too, avoid SCK * glitches of half a cycle by never allowing tCSC + tASC to go below * half a SCK period.
*/ if (cs_sck_delay < quarter_period_ns)
cs_sck_delay = quarter_period_ns; if (sck_cs_delay < quarter_period_ns)
sck_cs_delay = quarter_period_ns;
/* * Terminate all pending DMA transactions for the SPI working * in TARGET mode.
*/ if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
dmaengine_terminate_sync(dspi->dma->chan_rx);
dmaengine_terminate_sync(dspi->dma->chan_tx);
}
/* Clear the internal DSPI RX and TX FIFO buffers */
regmap_update_bits(dspi->regmap, SPI_MCR,
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(base)) {
ret = PTR_ERR(base); goto out_ctlr_put;
}
dspi->regmap = devm_regmap_init_mmio(&pdev->dev, base,
dspi->devtype_data->regmap); if (IS_ERR(dspi->regmap)) {
dev_err(&pdev->dev, "failed to init regmap: %ld\n",
PTR_ERR(dspi->regmap));
ret = PTR_ERR(dspi->regmap); goto out_ctlr_put;
}
if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) {
dspi->regmap_pushr = devm_regmap_init_mmio(
&pdev->dev, base + SPI_PUSHR,
&dspi_regmap_config[DSPI_PUSHR]); if (IS_ERR(dspi->regmap_pushr)) {
dev_err(&pdev->dev, "failed to init pushr regmap: %ld\n",
PTR_ERR(dspi->regmap_pushr));
ret = PTR_ERR(dspi->regmap_pushr); goto out_ctlr_put;
}
}
dspi->clk = devm_clk_get_enabled(&pdev->dev, "dspi"); if (IS_ERR(dspi->clk)) {
ret = PTR_ERR(dspi->clk);
dev_err(&pdev->dev, "unable to get clock\n"); goto out_ctlr_put;
}
ret = dspi_init(dspi); if (ret) goto out_ctlr_put;
dspi->irq = platform_get_irq(pdev, 0); if (dspi->irq <= 0) {
dev_info(&pdev->dev, "can't get platform irq, using poll mode\n");
dspi->irq = 0; goto poll_mode;
}
init_completion(&dspi->xfer_done);
ret = request_threaded_irq(dspi->irq, dspi_interrupt, NULL,
IRQF_SHARED, pdev->name, dspi); if (ret < 0) {
dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n"); goto out_ctlr_put;
}
poll_mode:
if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
ret = dspi_request_dma(dspi, res->start); if (ret < 0) {
dev_err(&pdev->dev, "can't get dma channels\n"); goto out_free_irq;
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.