/* use PIO for small transfers, avoiding DMA setup/teardown overhead and * cache operations; better heuristics consider wordsize and bitrate.
*/ #define DMA_MIN_BYTES 160
/* * Used for context save and restore, structure members to be updated whenever * corresponding registers are modified.
*/ struct omap2_mcspi_regs {
u32 modulctrl;
u32 wakeupenable; struct list_head cs;
};
struct omap2_mcspi { struct completion txdone; struct spi_controller *ctlr; /* Virtual base address of the controller */ void __iomem *base; unsignedlong phys; /* SPI1 has 4 channels, while SPI2 has 2 */ struct omap2_mcspi_dma *dma_channels; struct device *dev; struct omap2_mcspi_regs ctx; struct clk *ref_clk; int fifo_depth; bool target_aborted; unsignedint pin_dir:1;
size_t max_xfer_len;
u32 ref_clk_hz; bool use_multi_mode; bool last_msg_kept_cs;
};
struct omap2_mcspi_cs { void __iomem *base; unsignedlong phys; int word_len;
u16 mode; struct list_head node; /* Context save and restore shadow register */
u32 chconf0, chctrl0;
};
/* The controller handles the inverted chip selects * using the OMAP2_MCSPI_CHCONF_EPOL bit so revert * the inversion from the core spi_set_cs function.
*/ if (spi->mode & SPI_CS_HIGH)
enable = !enable;
if (spi->controller_state) { int err = pm_runtime_resume_and_get(mcspi->dev); if (err < 0) {
dev_err(mcspi->dev, "failed to get sync: %d\n", err); return;
}
l = mcspi_cached_chconf0(spi);
/* Only enable chip select manually if single mode is used */ if (mcspi->use_multi_mode) {
l &= ~OMAP2_MCSPI_CHCONF_FORCE;
} else { if (enable)
l &= ~OMAP2_MCSPI_CHCONF_FORCE; else
l |= OMAP2_MCSPI_CHCONF_FORCE;
}
/* * Choose host or target mode
*/
l = mcspi_read_reg(ctlr, OMAP2_MCSPI_MODULCTRL);
l &= ~(OMAP2_MCSPI_MODULCTRL_STEST); if (spi_controller_is_target(ctlr)) {
l |= (OMAP2_MCSPI_MODULCTRL_MS);
} else {
l &= ~(OMAP2_MCSPI_MODULCTRL_MS);
/* Enable single mode if needed */ if (mcspi->use_multi_mode)
l &= ~OMAP2_MCSPI_MODULCTRL_SINGLE; else
l |= OMAP2_MCSPI_MODULCTRL_SINGLE;
}
mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, l);
/* * In the "End-of-Transfer Procedure" section for DMA RX in OMAP35x TRM * it mentions reducing DMA transfer length by one element in host * normal mode.
*/ if (mcspi->fifo_depth == 0)
transfer_reduction = es;
word_len = cs->word_len;
l = mcspi_cached_chconf0(spi);
/* * Reduce DMA transfer length by one more if McSPI is * configured in turbo mode.
*/ if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
transfer_reduction += es;
if (transfer_reduction) { /* Split sgl into two. The second sgl won't be used. */
sizes[0] = count - transfer_reduction;
sizes[1] = transfer_reduction;
nb_sizes = 2;
} else { /* * Don't bother splitting the sgl. This essentially * clones the original sgl.
*/
sizes[0] = count;
nb_sizes = 1;
}
ret = sg_split(xfer->rx_sg.sgl, xfer->rx_sg.nents, 0, nb_sizes,
sizes, sg_out, out_mapped_nents, GFP_KERNEL);
ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_rx_completion); if (ret || mcspi->target_aborted) {
dmaengine_terminate_sync(mcspi_dma->dma_rx);
omap2_mcspi_set_dma_req(spi, 1, 0); return 0;
}
for (x = 0; x < nb_sizes; x++)
kfree(sg_out[x]);
if (mcspi->fifo_depth > 0) return count;
/* * Due to the DMA transfer length reduction the missing bytes must * be read manually to receive all of the expected data.
*/
omap2_mcspi_set_enable(spi, 0);
elements = element_count - 1;
if (l & OMAP2_MCSPI_CHCONF_TURBO) {
elements--;
if (!mcspi_wait_for_reg_bit(chstat_reg,
OMAP2_MCSPI_CHSTAT_RXS)) {
u32 w;
count = xfer->len;
c = count;
word_len = cs->word_len;
l = mcspi_cached_chconf0(spi);
/* We store the pre-calculated register addresses on stack to speed
* up the transfer loop. */
tx_reg = base + OMAP2_MCSPI_TX0;
rx_reg = base + OMAP2_MCSPI_RX0;
chstat_reg = base + OMAP2_MCSPI_CHSTAT0;
if (c < (word_len>>3)) return 0;
if (word_len <= 8) {
u8 *rx; const u8 *tx;
rx = xfer->rx_buf;
tx = xfer->tx_buf;
do {
c -= 1; if (tx != NULL) { if (mcspi_wait_for_reg_bit(chstat_reg,
OMAP2_MCSPI_CHSTAT_TXS) < 0) {
dev_err(&spi->dev, "TXS timed out\n"); goto out;
}
dev_vdbg(&spi->dev, "write-%d %02x\n",
word_len, *tx);
writel_relaxed(*tx++, tx_reg);
} if (rx != NULL) { if (mcspi_wait_for_reg_bit(chstat_reg,
OMAP2_MCSPI_CHSTAT_RXS) < 0) {
dev_err(&spi->dev, "RXS timed out\n"); goto out;
}
*rx++ = readl_relaxed(rx_reg);
dev_vdbg(&spi->dev, "read-%d %08x\n",
word_len, *(rx - 1));
} /* Add word delay between each word */
spi_delay_exec(&xfer->word_delay, xfer);
} while (c >= 4);
}
/* for TX_ONLY mode, be sure all words have shifted out */ if (xfer->rx_buf == NULL) { if (mcspi_wait_for_reg_bit(chstat_reg,
OMAP2_MCSPI_CHSTAT_TXS) < 0) {
dev_err(&spi->dev, "TXS timed out\n");
} elseif (mcspi_wait_for_reg_bit(chstat_reg,
OMAP2_MCSPI_CHSTAT_EOT) < 0)
dev_err(&spi->dev, "EOT timed out\n");
/* disable chan to purge rx datas received in TX_ONLY transfer, * otherwise these rx datas will affect the direct following * RX_ONLY transfer.
*/
omap2_mcspi_set_enable(spi, 0);
}
out:
omap2_mcspi_set_enable(spi, 1); return count - c;
}
/* standard 4-wire host mode: SCK, MOSI/out, MISO/in, nCS * REVISIT: this controller could support SPI_3WIRE mode.
*/ if (mcspi->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) {
l &= ~OMAP2_MCSPI_CHCONF_IS;
l &= ~OMAP2_MCSPI_CHCONF_DPE1;
l |= OMAP2_MCSPI_CHCONF_DPE0;
} else {
l |= OMAP2_MCSPI_CHCONF_IS;
l |= OMAP2_MCSPI_CHCONF_DPE1;
l &= ~OMAP2_MCSPI_CHCONF_DPE0;
}
/* wordlength */
l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
l |= (word_len - 1) << 7;
/* set chipselect polarity; manage with FORCE */ if (!(spi->mode & SPI_CS_HIGH))
l |= OMAP2_MCSPI_CHCONF_EPOL; /* active-low; normal */ else
l &= ~OMAP2_MCSPI_CHCONF_EPOL;
/* set clock divisor */
l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
l |= clkd << 2;
/* set clock granularity */
l &= ~OMAP2_MCSPI_CHCONF_CLKG;
l |= clkg; if (clkg) {
cs->chctrl0 &= ~OMAP2_MCSPI_CHCTRL_EXTCLK_MASK;
cs->chctrl0 |= extclk << 8;
mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
}
/* set SPI mode 0..3 */ if (spi->mode & SPI_CPOL)
l |= OMAP2_MCSPI_CHCONF_POL; else
l &= ~OMAP2_MCSPI_CHCONF_POL; if (spi->mode & SPI_CPHA)
l |= OMAP2_MCSPI_CHCONF_PHA; else
l &= ~OMAP2_MCSPI_CHCONF_PHA;
mcspi_write_chconf0(spi, l | OMAP2_MCSPI_CHCONF_FORCE);
mcspi_write_chconf0(spi, l);
/* * Note that we currently allow DMA only if we get a channel * for both rx and tx. Otherwise we'll do PIO for both rx and tx.
*/ staticint omap2_mcspi_request_dma(struct omap2_mcspi *mcspi, struct omap2_mcspi_dma *mcspi_dma)
{ int ret = 0;
mcspi_dma->dma_rx = dma_request_chan(mcspi->dev,
mcspi_dma->dma_rx_ch_name); if (IS_ERR(mcspi_dma->dma_rx)) {
ret = PTR_ERR(mcspi_dma->dma_rx);
mcspi_dma->dma_rx = NULL; goto no_dma;
}
mcspi_dma->dma_tx = dma_request_chan(mcspi->dev,
mcspi_dma->dma_tx_ch_name); if (IS_ERR(mcspi_dma->dma_tx)) {
ret = PTR_ERR(mcspi_dma->dma_tx);
mcspi_dma->dma_tx = NULL;
dma_release_channel(mcspi_dma->dma_rx);
mcspi_dma->dma_rx = NULL;
}
/* We only enable one channel at a time -- the one whose message is * -- although this controller would gladly * arbitrate among multiple channels. This corresponds to "single * channel" host mode. As a side effect, we need to manage the * chipselect with the FORCE bit ... CS != channel enable.
*/
struct omap2_mcspi *mcspi; struct omap2_mcspi_dma *mcspi_dma; struct omap2_mcspi_cs *cs; struct omap2_mcspi_device_config *cd; int par_override = 0; int status = 0;
u32 chconf;
/* * The target driver could have changed spi->mode in which case * it will be different from cs->mode (the current hardware setup). * If so, set par_override (even though its not a parity issue) so * omap2_mcspi_setup_transfer will be called to configure the hardware * with the correct mode on the first iteration of the loop below.
*/ if (spi->mode != cs->mode)
par_override = 1;
omap2_mcspi_set_enable(spi, 0);
if (spi_get_csgpiod(spi, 0))
omap2_mcspi_set_cs(spi, spi->mode & SPI_CS_HIGH);
if (par_override ||
(t->speed_hz != spi->max_speed_hz) ||
(t->bits_per_word != spi->bits_per_word)) {
par_override = 1;
status = omap2_mcspi_setup_transfer(spi, t); if (status < 0) goto out; if (t->speed_hz == spi->max_speed_hz &&
t->bits_per_word == spi->bits_per_word)
par_override = 0;
}
if (cd && cd->turbo_mode && t->tx_buf == NULL) { /* Turbo mode is for more than one word */ if (t->len > ((cs->word_len + 7) >> 3))
chconf |= OMAP2_MCSPI_CHCONF_TURBO;
}
/* * The conditions are strict, it is mandatory to check each transfer of the list to see if * multi-mode is applicable.
*/
mcspi->use_multi_mode = true;
if (mcspi->last_msg_kept_cs)
mcspi->use_multi_mode = false;
/* * Check if this transfer contains only one word;
*/ if (bits_per_word < 8 && tr->len == 1) { /* multi-mode is applicable, only one word (1..7 bits) */
} elseif (bits_per_word >= 8 && tr->len == bits_per_word / 8) { /* multi-mode is applicable, only one word (8..32 bits) */
} else { /* multi-mode is not applicable: more than one word in the transfer */
mcspi->use_multi_mode = false;
}
if (list_is_last(&tr->transfer_list, &msg->transfers)) { /* Check if transfer asks to keep the CS status after the whole message */ if (tr->cs_change) {
mcspi->use_multi_mode = false;
mcspi->last_msg_kept_cs = true;
} else {
mcspi->last_msg_kept_cs = false;
}
} else { /* Check if transfer asks to change the CS status after the transfer */ if (!tr->cs_change)
mcspi->use_multi_mode = false;
}
}
omap2_mcspi_set_mode(ctlr);
/* In single mode only a single channel can have the FORCE bit enabled * in its chconf0 register. * Scan all channels and disable them except the current one. * A FORCE can remain from a last transfer having cs_change enabled * * In multi mode all FORCE bits must be disabled.
*/
list_for_each_entry(cs, &ctx->cs, node) { if (msg->spi->controller_state == cs && !mcspi->use_multi_mode) { continue;
}
staticint omap_mcspi_runtime_suspend(struct device *dev)
{ int error;
error = pinctrl_pm_select_idle_state(dev); if (error)
dev_warn(dev, "%s: failed to set pins: %i\n", __func__, error);
return 0;
}
/* * When SPI wake up from off-mode, CS is in activate state. If it was in * inactive state when driver was suspend, then force it to inactive state at * wake up.
*/ staticint omap_mcspi_runtime_resume(struct device *dev)
{ struct spi_controller *ctlr = dev_get_drvdata(dev); struct omap2_mcspi *mcspi = spi_controller_get_devdata(ctlr); struct omap2_mcspi_regs *ctx = &mcspi->ctx; struct omap2_mcspi_cs *cs; int error;
error = pinctrl_pm_select_default_state(dev); if (error)
dev_warn(dev, "%s: failed to set pins: %i\n", __func__, error);
mcspi->dma_channels = devm_kcalloc(&pdev->dev, ctlr->num_chipselect, sizeof(struct omap2_mcspi_dma),
GFP_KERNEL); if (mcspi->dma_channels == NULL) {
status = -ENOMEM; goto free_ctlr;
}
for (i = 0; i < ctlr->num_chipselect; i++) {
sprintf(mcspi->dma_channels[i].dma_rx_ch_name, "rx%d", i);
sprintf(mcspi->dma_channels[i].dma_tx_ch_name, "tx%d", i);
status = omap2_mcspi_request_dma(mcspi,
&mcspi->dma_channels[i]); if (status == -EPROBE_DEFER) goto free_ctlr;
}
status = platform_get_irq(pdev, 0); if (status < 0) goto free_ctlr;
init_completion(&mcspi->txdone);
status = devm_request_irq(&pdev->dev, status,
omap2_mcspi_irq_handler, 0, pdev->name,
mcspi); if (status) {
dev_err(&pdev->dev, "Cannot request IRQ"); goto free_ctlr;
}
mcspi->ref_clk = devm_clk_get_optional_enabled(&pdev->dev, NULL); if (IS_ERR(mcspi->ref_clk)) {
status = PTR_ERR(mcspi->ref_clk);
dev_err_probe(&pdev->dev, status, "Failed to get ref_clk"); goto free_ctlr;
} if (mcspi->ref_clk)
mcspi->ref_clk_hz = clk_get_rate(mcspi->ref_clk); else
mcspi->ref_clk_hz = OMAP2_MCSPI_MAX_FREQ;
ctlr->max_speed_hz = mcspi->ref_clk_hz;
ctlr->min_speed_hz = mcspi->ref_clk_hz >> 15;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.