/* default sizes - can be changed when SPI Engine firmware is compiled */ #define SPI_ENGINE_OFFLOAD_CMD_FIFO_SIZE 16 #define SPI_ENGINE_OFFLOAD_SDO_FIFO_SIZE 16
/** * struct spi_engine_message_state - SPI engine per-message state
*/ struct spi_engine_message_state { /** @cmd_length: Number of elements in cmd_buf array. */ unsigned cmd_length; /** @cmd_buf: Array of commands not yet written to CMD FIFO. */ const uint16_t *cmd_buf; /** @tx_xfer: Next xfer with tx_buf not yet fully written to TX FIFO. */ struct spi_transfer *tx_xfer; /** @tx_length: Size of tx_buf in bytes. */ unsignedint tx_length; /** @tx_buf: Bytes not yet written to TX FIFO. */ const uint8_t *tx_buf; /** @rx_xfer: Next xfer with rx_buf not yet fully written to RX FIFO. */ struct spi_transfer *rx_xfer; /** @rx_length: Size of tx_buf in bytes. */ unsignedint rx_length; /** @rx_buf: Bytes not yet written to the RX FIFO. */
uint8_t *rx_buf;
};
if (xfer->bits_per_word <= 8)
len = xfer->len; elseif (xfer->bits_per_word <= 16)
len = xfer->len / 2; else
len = xfer->len / 4;
while (len) { unsignedint n = min(len, 256U); unsignedint flags = 0;
if (xfer->tx_buf || (xfer->offload_flags & SPI_OFFLOAD_XFER_TX_STREAM))
flags |= SPI_ENGINE_TRANSFER_WRITE; if (xfer->rx_buf || (xfer->offload_flags & SPI_OFFLOAD_XFER_RX_STREAM))
flags |= SPI_ENGINE_TRANSFER_READ;
spi_engine_program_add_cmd(p, dry,
SPI_ENGINE_CMD_TRANSFER(flags, n - 1));
len -= n;
}
}
staticvoid spi_engine_gen_sleep(struct spi_engine_program *p, bool dry, int delay_ns, int inst_ns, u32 sclk_hz)
{ unsignedint t;
/* * Negative delay indicates error, e.g. from spi_delay_to_ns(). And if * delay is less that the instruction execution time, there is no need * for an extra sleep instruction since the instruction execution time * will already cover the required delay.
*/ if (delay_ns < 0 || delay_ns <= inst_ns) return;
t = DIV_ROUND_UP_ULL((u64)(delay_ns - inst_ns) * sclk_hz, NSEC_PER_SEC); while (t) { unsignedint n = min(t, 256U);
spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_SLEEP(n - 1));
t -= n;
}
}
/* * Performs precompile steps on the message. * * The SPI core does most of the message/transfer validation and filling in * fields for us via __spi_validate(). This fixes up anything remaining not * done there. * * NB: This is separate from spi_engine_compile_message() because the latter * is called twice and would otherwise result in double-evaluation. * * Returns 0 on success, -EINVAL on failure.
*/ staticint spi_engine_precompile_message(struct spi_message *msg)
{ unsignedint clk_div, max_hz = msg->spi->controller->max_speed_hz; struct spi_transfer *xfer;
u8 min_bits_per_word = U8_MAX;
u8 max_bits_per_word = 0;
list_for_each_entry(xfer, &msg->transfers, transfer_list) { /* If we have an offload transfer, we can't rx to buffer */ if (msg->offload && xfer->rx_buf) return -EINVAL;
/* * If all xfers in the message use the same bits_per_word, we can * provide some optimization when using SPI offload.
*/ if (msg->offload) { struct spi_engine_offload *priv = msg->offload->priv;
/* * Take into account instruction execution time for more accurate sleep * times, especially when the delay is small.
*/
inst_ns = DIV_ROUND_UP(NSEC_PER_SEC, host->max_speed_hz);
clk_div = 1;
/* * As an optimization, SPI offload sets once this when the offload is * enabled instead of repeating the instruction in each message.
*/ if (msg->offload) {
priv = msg->offload->priv;
priv->spi_mode_config = spi_engine_get_config(spi);
/* * If all xfers use the same bits_per_word, it can be optimized * in the same way.
*/
bits_per_word = priv->bits_per_word;
} else {
spi_engine_program_add_cmd(p, dry,
SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
spi_engine_get_config(spi)));
}
if (!keep_cs)
spi_engine_gen_cs(p, dry, spi, false);
/* * Restore clockdiv to default so that future gen_sleep commands don't * have to be aware of the current register state.
*/ if (clk_div != 1)
spi_engine_program_add_cmd(p, dry,
SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV, 0));
}
n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM); while (n && st->cmd_length) {
m = min(n, st->cmd_length);
buf = st->cmd_buf; for (i = 0; i < m; i++)
writel_relaxed(buf[i], addr);
st->cmd_buf += m;
st->cmd_length -= m;
n -= m;
}
n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM); while (n && st->tx_length) { if (st->tx_xfer->bits_per_word <= 8) { const u8 *buf = st->tx_buf;
m = min(n, st->tx_length); for (i = 0; i < m; i++)
writel_relaxed(buf[i], addr);
st->tx_buf += m;
st->tx_length -= m;
} elseif (st->tx_xfer->bits_per_word <= 16) { const u16 *buf = (const u16 *)st->tx_buf;
m = min(n, st->tx_length / 2); for (i = 0; i < m; i++)
writel_relaxed(buf[i], addr);
st->tx_buf += m * 2;
st->tx_length -= m * 2;
} else { const u32 *buf = (const u32 *)st->tx_buf;
m = min(n, st->tx_length / 4); for (i = 0; i < m; i++)
writel_relaxed(buf[i], addr);
st->tx_buf += m * 4;
st->tx_length -= m * 4;
}
n -= m; if (st->tx_length == 0)
spi_engine_tx_next(msg);
}
n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL); while (n && st->rx_length) { if (st->rx_xfer->bits_per_word <= 8) {
u8 *buf = st->rx_buf;
m = min(n, st->rx_length); for (i = 0; i < m; i++)
buf[i] = readl_relaxed(addr);
st->rx_buf += m;
st->rx_length -= m;
} elseif (st->rx_xfer->bits_per_word <= 16) {
u16 *buf = (u16 *)st->rx_buf;
m = min(n, st->rx_length / 2); for (i = 0; i < m; i++)
buf[i] = readl_relaxed(addr);
st->rx_buf += m * 2;
st->rx_length -= m * 2;
} else {
u32 *buf = (u32 *)st->rx_buf;
m = min(n, st->rx_length / 4); for (i = 0; i < m; i++)
buf[i] = readl_relaxed(addr);
st->rx_buf += m * 4;
st->rx_length -= m * 4;
}
n -= m; if (st->rx_length == 0)
spi_engine_rx_next(msg);
}
if (p->length > spi_engine->offload_ctrl_mem_size) return -EINVAL;
/* count total number of tx words in message */
list_for_each_entry(xfer, &msg->transfers, transfer_list) { /* no support for reading to rx_buf */ if (xfer->rx_buf) return -EINVAL;
if (tx_word_count && !(spi_engine->offload_caps & SPI_OFFLOAD_CAP_TX_STATIC_DATA)) return -EINVAL;
if (tx_word_count > spi_engine->offload_sdo_mem_size) return -EINVAL;
/* * This protects against calling spi_optimize_message() with an offload * that has already been prepared with a different message.
*/ if (test_and_set_bit_lock(SPI_ENGINE_OFFLOAD_FLAG_PREPARED, &priv->flags)) return -EBUSY;
p = kzalloc(struct_size(p, instructions, p_dry.length + 1), GFP_KERNEL); if (!p) return -ENOMEM;
spi_engine_compile_message(msg, false, p);
/* * Non-offload needs SYNC for completion interrupt. Older versions of * the IP core also need SYNC for offload to work properly.
*/ if (!msg->offload || spi_engine->offload_requires_sync)
spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(
msg->offload ? 0 : AXI_SPI_ENGINE_CUR_MSG_SYNC_ID));
msg->opt_state = p;
if (msg->offload) {
ret = spi_engine_offload_prepare(msg); if (ret) {
msg->opt_state = NULL;
kfree(p); return ret;
}
}
return 0;
}
staticint spi_engine_unoptimize_message(struct spi_message *msg)
{ if (msg->offload)
spi_engine_offload_unprepare(msg->offload);
/* * In addition to setting the flags, we have to do a CS assert command * to make the new setting actually take effect.
*/
writel_relaxed(SPI_ENGINE_CMD_ASSERT(0, 0xff),
spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
if (msg->offload) {
dev_err(&host->dev, "Single transfer offload not supported\n");
msg->status = -EOPNOTSUPP; goto out;
}
/* reinitialize message state for this transfer */
memset(st, 0, sizeof(*st));
st->cmd_buf = p->instructions;
st->cmd_length = p->length;
msg->state = st;
reinit_completion(&spi_engine->msg_complete);
if (trace_spi_transfer_start_enabled()) { struct spi_transfer *xfer;
if (!wait_for_completion_timeout(&spi_engine->msg_complete,
msecs_to_jiffies(5000))) {
dev_err(&host->dev, "Timeout occurred while waiting for transfer to complete. Hardware is probably broken.\n");
msg->status = -ETIMEDOUT;
}
if (trace_spi_transfer_stop_enabled()) { struct spi_transfer *xfer;
/* * REVISIT: for now, all SPI Engines only have one offload. In the * future, this should be read from a memory mapped register to * determine the number of offloads enabled at HDL compile time. For * now, we can tell if an offload is present if there is a trigger * source wired up to it.
*/ if (device_property_present(&pdev->dev, "trigger-sources")) { struct spi_engine_offload *priv;
spi_engine->offload =
devm_spi_offload_alloc(&pdev->dev, sizeof(struct spi_engine_offload)); if (IS_ERR(spi_engine->offload)) return PTR_ERR(spi_engine->offload);
if (device_property_match_string(&pdev->dev, "dma-names", "offload0-tx") >= 0) {
spi_engine->offload_caps |= SPI_OFFLOAD_CAP_TX_STREAM_DMA;
spi_engine->offload->xfer_flags |= SPI_OFFLOAD_XFER_TX_STREAM;
} else { /* * HDL compile option to enable TX DMA stream also disables * the SDO memory, so can't do both at the same time.
*/
spi_engine->offload_caps |= SPI_OFFLOAD_CAP_TX_STATIC_DATA;
}
}
spi_engine->clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk"); if (IS_ERR(spi_engine->clk)) return PTR_ERR(spi_engine->clk);
spi_engine->ref_clk = devm_clk_get_enabled(&pdev->dev, "spi_clk"); if (IS_ERR(spi_engine->ref_clk)) return PTR_ERR(spi_engine->ref_clk);
spi_engine->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(spi_engine->base)) return PTR_ERR(spi_engine->base);
version = readl(spi_engine->base + ADI_AXI_REG_VERSION); if (ADI_AXI_PCORE_VER_MAJOR(version) != 1) {
dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%u\n",
ADI_AXI_PCORE_VER_MAJOR(version),
ADI_AXI_PCORE_VER_MINOR(version),
ADI_AXI_PCORE_VER_PATCH(version)); return -ENODEV;
}
/* Some features depend of the IP core version. */ if (ADI_AXI_PCORE_VER_MAJOR(version) >= 1) { if (ADI_AXI_PCORE_VER_MINOR(version) >= 2) {
host->mode_bits |= SPI_CS_HIGH;
host->setup = spi_engine_setup;
} if (ADI_AXI_PCORE_VER_MINOR(version) >= 3)
host->mode_bits |= SPI_MOSI_IDLE_LOW | SPI_MOSI_IDLE_HIGH;
}
if (host->max_speed_hz == 0) return dev_err_probe(&pdev->dev, -EINVAL, "spi_clk rate is 0");
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.