/* * DW SPI controller demands any native CS being set in order to * proceed with data transfer. So in order to activate the SPI * communications we must set a corresponding bit in the Slave * Enable register no matter whether the SPI core is configured to * support active-high or active-low CS level.
*/ if (cs_high == enable)
dw_writel(dws, DW_SPI_SER, BIT(spi_get_chipselect(spi, 0))); else
dw_writel(dws, DW_SPI_SER, 0);
}
EXPORT_SYMBOL_NS_GPL(dw_spi_set_cs, "SPI_DW_CORE");
/* Return the max entries we can fill into tx fifo */ staticinline u32 dw_spi_tx_max(struct dw_spi *dws)
{
u32 tx_room, rxtx_gap;
/* * Another concern is about the tx/rx mismatch, we * though to use (dws->fifo_len - rxflr - txflr) as * one maximum value for tx, but it doesn't cover the * data which is out of tx/rx fifo and inside the * shift registers. So a control from sw point of * view is taken.
*/
rxtx_gap = dws->fifo_len - (dws->rx_len - dws->tx_len);
/* Return the max entries we should read out of rx fifo */ staticinline u32 dw_spi_rx_max(struct dw_spi *dws)
{ return min_t(u32, dws->rx_len, dw_readl(dws, DW_SPI_RXFLR));
}
if (dw_spi_check_status(dws, false)) {
spi_finalize_current_transfer(dws->host); return IRQ_HANDLED;
}
/* * Read data from the Rx FIFO every time we've got a chance executing * this method. If there is nothing left to receive, terminate the * procedure. Otherwise adjust the Rx FIFO Threshold level if it's a * final stage of the transfer. By doing so we'll get the next IRQ * right when the leftover incoming data is received.
*/
dw_reader(dws); if (!dws->rx_len) {
dw_spi_mask_intr(dws, 0xff);
spi_finalize_current_transfer(dws->host);
} elseif (dws->rx_len <= dw_readl(dws, DW_SPI_RXFTLR)) {
dw_writel(dws, DW_SPI_RXFTLR, dws->rx_len - 1);
}
/* * Send data out if Tx FIFO Empty IRQ is received. The IRQ will be * disabled after the data transmission is finished so not to * have the TXE IRQ flood at the final stage of the transfer.
*/ if (irq_status & DW_SPI_INT_TXEI) {
dw_writer(dws); if (!dws->tx_len)
dw_spi_mask_intr(dws, DW_SPI_INT_TXEI);
}
/* * Originally Tx and Rx data lengths match. Rx FIFO Threshold level * will be adjusted at the final stage of the IRQ-based SPI transfer * execution so not to lose the leftover of the incoming data.
*/
level = min_t(unsignedint, dws->fifo_len / 2, dws->tx_len);
dw_writel(dws, DW_SPI_TXFTLR, level);
dw_writel(dws, DW_SPI_RXFTLR, level - 1);
/* * The iterative procedure of the poll-based transfer is simple: write as much * as possible to the Tx FIFO, wait until the pending to receive data is ready * to be read, read it from the Rx FIFO and check whether the performed * procedure has been successful. * * Note this method the same way as the IRQ-based transfer won't work well for * the SPI devices connected to the controller with native CS due to the * automatic CS assertion/de-assertion.
*/ staticint dw_spi_poll_transfer(struct dw_spi *dws, struct spi_transfer *transfer)
{ struct spi_delay delay;
u16 nbits; int ret;
/* * Calculate the total length of the EEPROM command transfer and * either use the pre-allocated buffer or create a temporary one.
*/
len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes; if (op->data.dir == SPI_MEM_DATA_OUT)
len += op->data.nbytes;
if (len <= DW_SPI_BUF_SIZE) {
out = dws->buf;
} else {
out = kzalloc(len, GFP_KERNEL); if (!out) return -ENOMEM;
}
/* * Collect the operation code, address and dummy bytes into the single * buffer. If it's a transfer with data to be sent, also copy it into the * single buffer in order to speed the data transmission up.
*/ for (i = 0; i < op->cmd.nbytes; ++i)
out[i] = DW_SPI_GET_BYTE(op->cmd.opcode, op->cmd.nbytes - i - 1); for (j = 0; j < op->addr.nbytes; ++i, ++j)
out[i] = DW_SPI_GET_BYTE(op->addr.val, op->addr.nbytes - j - 1); for (j = 0; j < op->dummy.nbytes; ++i, ++j)
out[i] = 0x0;
if (op->data.dir == SPI_MEM_DATA_OUT)
memcpy(&out[i], op->data.buf.out, op->data.nbytes);
/* * At initial stage we just pre-fill the Tx FIFO in with no rush, * since native CS hasn't been enabled yet and the automatic data * transmission won't start til we do that.
*/
len = min(dws->fifo_len, dws->tx_len);
buf = dws->tx; while (len--)
dw_write_io_reg(dws, DW_SPI_DR, *buf++);
/* * After setting any bit in the SER register the transmission will * start automatically. We have to keep up with that procedure * otherwise the CS de-assertion will happen whereupon the memory * operation will be pre-terminated.
*/
len = dws->tx_len - ((void *)buf - dws->tx);
dw_spi_set_cs(spi, false); while (len) {
entries = readl_relaxed(dws->regs + DW_SPI_TXFLR); if (!entries) {
dev_err(&dws->host->dev, "CS de-assertion on Tx\n"); return -EIO;
}
room = min(dws->fifo_len - entries, len); for (; room; --room, --len)
dw_write_io_reg(dws, DW_SPI_DR, *buf++);
}
/* * Data fetching will start automatically if the EEPROM-read mode is * activated. We have to keep up with the incoming data pace to * prevent the Rx FIFO overflow causing the inbound data loss.
*/
len = dws->rx_len;
buf = dws->rx; while (len) {
entries = readl_relaxed(dws->regs + DW_SPI_RXFLR); if (!entries) {
sts = readl_relaxed(dws->regs + DW_SPI_RISR); if (sts & DW_SPI_INT_RXOI) {
dev_err(&dws->host->dev, "FIFO overflow on Rx\n"); return -EIO;
} continue;
}
entries = min(entries, len); for (; entries; --entries, --len)
*buf++ = dw_read_io_reg(dws, DW_SPI_DR);
}
/* * The SPI memory operation implementation below is the best choice for the * devices, which are selected by the native chip-select lane. It's * specifically developed to workaround the problem with automatic chip-select * lane toggle when there is no data in the Tx FIFO buffer. Luckily the current * SPI-mem core calls exec_op() callback only if the GPIO-based CS is * unavailable.
*/ staticint dw_spi_exec_mem_op(struct spi_mem *mem, conststruct spi_mem_op *op)
{ struct dw_spi *dws = spi_controller_get_devdata(mem->spi->controller); struct dw_spi_cfg cfg; unsignedlong flags; int ret;
/* * Collect the outbound data into a single buffer to speed the * transmission up at least on the initial stage.
*/
ret = dw_spi_init_mem_buf(dws, op); if (ret) return ret;
/* * DW SPI EEPROM-read mode is required only for the SPI memory Data-IN * operation. Transmit-only mode is suitable for the rest of them.
*/
cfg.dfs = 8;
cfg.freq = clamp(op->max_freq, 0U, dws->max_mem_freq); if (op->data.dir == SPI_MEM_DATA_IN) {
cfg.tmode = DW_SPI_CTRLR0_TMOD_EPROMREAD;
cfg.ndf = op->data.nbytes;
} else {
cfg.tmode = DW_SPI_CTRLR0_TMOD_TO;
}
dw_spi_enable_chip(dws, 0);
dw_spi_update_config(dws, mem->spi, &cfg);
dw_spi_mask_intr(dws, 0xff);
dw_spi_enable_chip(dws, 1);
/* * DW APB SSI controller has very nasty peculiarities. First originally * (without any vendor-specific modifications) it doesn't provide a * direct way to set and clear the native chip-select signal. Instead * the controller asserts the CS lane if Tx FIFO isn't empty and a * transmission is going on, and automatically de-asserts it back to * the high level if the Tx FIFO doesn't have anything to be pushed * out. Due to that a multi-tasking or heavy IRQs activity might be * fatal, since the transfer procedure preemption may cause the Tx FIFO * getting empty and sudden CS de-assertion, which in the middle of the * transfer will most likely cause the data loss. Secondly the * EEPROM-read or Read-only DW SPI transfer modes imply the incoming * data being automatically pulled in into the Rx FIFO. So if the * driver software is late in fetching the data from the FIFO before * it's overflown, new incoming data will be lost. In order to make * sure the executed memory operations are CS-atomic and to prevent the * Rx FIFO overflow we have to disable the local interrupts so to block * any preemption during the subsequent IO operations. * * Note. At some circumstances disabling IRQs may not help to prevent * the problems described above. The CS de-assertion and Rx FIFO * overflow may still happen due to the relatively slow system bus or * CPU not working fast enough, so the write-then-read algo implemented * here just won't keep up with the SPI bus data transfer. Such * situation is highly platform specific and is supposed to be fixed by * manually restricting the SPI bus frequency using the * dws->max_mem_freq parameter.
*/
local_irq_save(flags);
preempt_disable();
ret = dw_spi_write_then_read(dws, mem->spi);
local_irq_restore(flags);
preempt_enable();
/* * Wait for the operation being finished and check the controller * status only if there hasn't been any run-time error detected. In the * former case it's just pointless. In the later one to prevent an * additional error message printing since any hw error flag being set * would be due to an error detected on the data transfer.
*/ if (!ret) {
ret = dw_spi_wait_mem_op_done(dws); if (!ret)
ret = dw_spi_check_status(dws, true);
}
dw_spi_stop_mem_op(dws, mem->spi);
dw_spi_free_mem_buf(dws);
return ret;
}
/* * Initialize the default memory operations if a glue layer hasn't specified * custom ones. Direct mapping operations will be preserved anyway since DW SPI * controller doesn't have an embedded dirmap interface. Note the memory * operations implemented in this driver is the best choice only for the DW APB * SSI controller with standard native CS functionality. If a hardware vendor * has fixed the automatic CS assertion/de-assertion peculiarity, then it will * be safer to use the normal SPI-messages-based transfers implementation.
*/ staticvoid dw_spi_init_mem_ops(struct dw_spi *dws)
{ if (!dws->mem_ops.exec_op && !(dws->caps & DW_SPI_CAP_CS_OVERRIDE) &&
!dws->set_cs) {
dws->mem_ops.adjust_op_size = dw_spi_adjust_mem_op_size;
dws->mem_ops.supports_op = dw_spi_supports_mem_op;
dws->mem_ops.exec_op = dw_spi_exec_mem_op; if (!dws->max_mem_freq)
dws->max_mem_freq = dws->max_freq;
}
}
/* This may be called twice for each spi dev */ staticint dw_spi_setup(struct spi_device *spi)
{ struct dw_spi *dws = spi_controller_get_devdata(spi->controller); struct dw_spi_chip_data *chip;
/* Only alloc on first setup */
chip = spi_get_ctldata(spi); if (!chip) { struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
u32 rx_sample_dly_ns;
chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM;
spi_set_ctldata(spi, chip); /* Get specific / default rx-sample-delay */ if (device_property_read_u32(&spi->dev, "rx-sample-delay-ns",
&rx_sample_dly_ns) != 0) /* Use default controller value */
rx_sample_dly_ns = dws->def_rx_sample_dly_ns;
chip->rx_sample_dly = DIV_ROUND_CLOSEST(rx_sample_dly_ns,
NSEC_PER_SEC /
dws->max_freq);
}
/* * Update CR0 data each time the setup callback is invoked since * the device parameters could have been changed, for instance, by * the MMC SPI driver or something else.
*/
chip->cr0 = dw_spi_prepare_cr0(dws, spi);
/* Restart the controller, disable all interrupts, clean rx fifo */ staticvoid dw_spi_hw_init(struct device *dev, struct dw_spi *dws)
{
dw_spi_reset_chip(dws);
/* * Retrieve the Synopsys component version if it hasn't been specified * by the platform. CoreKit version ID is encoded as a 3-chars ASCII * code enclosed with '*' (typical for the most of Synopsys IP-cores).
*/ if (!dws->ver) {
dws->ver = dw_readl(dws, DW_SPI_VERSION);
/* * Try to detect the number of native chip-selects if the platform * driver didn't set it up. There can be up to 16 lines configured.
*/ if (!dws->num_cs) {
u32 ser;
dw_writel(dws, DW_SPI_SER, 0xffff);
ser = dw_readl(dws, DW_SPI_SER);
dw_writel(dws, DW_SPI_SER, 0);
dws->num_cs = hweight16(ser);
}
/* * Try to detect the FIFO depth if not set by interface driver, * the depth could be from 2 to 256 from HW spec
*/ if (!dws->fifo_len) {
u32 fifo;
/* * Detect CTRLR0.DFS field size and offset by testing the lowest bits * writability. Note DWC SSI controller also has the extended DFS, but * with zero offset.
*/ if (dw_spi_ip_is(dws, PSSI)) {
u32 cr0, tmp = dw_readl(dws, DW_SPI_CTRLR0);
ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev),
host); if (ret < 0 && ret != -ENOTCONN) {
dev_err(dev, "can not get IRQ\n"); goto err_free_host;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.