/* configuration load timeout in microseconds */ #define I2C_CONFIG_LOAD_TIMEOUT 1000000
/* packet header size in bytes */ #define I2C_PACKET_HEADER_SIZE 12
/* * I2C Controller will use PIO mode for transfers up to 32 bytes in order to * avoid DMA overhead, otherwise external APB DMA controller will be used. * Note that the actual MAX PIO length is 20 bytes because 32 bytes include * I2C_PACKET_HEADER_SIZE.
*/ #define I2C_PIO_MODE_PREFERRED_LEN 32
/* * msg_end_type: The bus control which needs to be sent at end of transfer. * @MSG_END_STOP: Send stop pulse. * @MSG_END_REPEAT_START: Send repeat-start. * @MSG_END_CONTINUE: Don't send stop or repeat-start.
*/ enum msg_end_type {
MSG_END_STOP,
MSG_END_REPEAT_START,
MSG_END_CONTINUE,
};
/** * struct tegra_i2c_hw_feature : per hardware generation features * @has_continue_xfer_support: continue-transfer supported * @has_per_pkt_xfer_complete_irq: Has enable/disable capability for transfer * completion interrupt on per packet basis. * @has_config_load_reg: Has the config load register to load the new * configuration. * @clk_divisor_hs_mode: Clock divisor in HS mode. * @clk_divisor_std_mode: Clock divisor in standard mode. It is * applicable if there is no fast clock source i.e. single clock * source. * @clk_divisor_fast_mode: Clock divisor in fast mode. It is * applicable if there is no fast clock source i.e. single clock * source. * @clk_divisor_fast_plus_mode: Clock divisor in fast mode plus. It is * applicable if there is no fast clock source (i.e. single * clock source). * @has_multi_master_mode: The I2C controller supports running in single-master * or multi-master mode. * @has_slcg_override_reg: The I2C controller supports a register that * overrides the second level clock gating. * @has_mst_fifo: The I2C controller contains the new MST FIFO interface that * provides additional features and allows for longer messages to * be transferred in one go. * @has_mst_reset: The I2C controller contains MASTER_RESET_CTRL register which * provides an alternative to controller reset when configured as * I2C master * @quirks: I2C adapter quirks for limiting write/read transfer size and not * allowing 0 length transfers. * @supports_bus_clear: Bus Clear support to recover from bus hang during * SDA stuck low from device for some unknown reasons. * @has_apb_dma: Support of APBDMA on corresponding Tegra chip. * @tlow_std_mode: Low period of the clock in standard mode. * @thigh_std_mode: High period of the clock in standard mode. * @tlow_fast_fastplus_mode: Low period of the clock in fast/fast-plus modes. * @thigh_fast_fastplus_mode: High period of the clock in fast/fast-plus modes. * @setup_hold_time_std_mode: Setup and hold time for start and stop conditions * in standard mode. * @setup_hold_time_fast_fast_plus_mode: Setup and hold time for start and stop * conditions in fast/fast-plus modes. * @setup_hold_time_hs_mode: Setup and hold time for start and stop conditions * in HS mode. * @has_interface_timing_reg: Has interface timing register to program the tuned * timing settings.
*/ struct tegra_i2c_hw_feature { bool has_continue_xfer_support; bool has_per_pkt_xfer_complete_irq; bool has_config_load_reg;
u32 clk_divisor_hs_mode;
u32 clk_divisor_std_mode;
u32 clk_divisor_fast_mode;
u32 clk_divisor_fast_plus_mode; bool has_multi_master_mode; bool has_slcg_override_reg; bool has_mst_fifo; bool has_mst_reset; conststruct i2c_adapter_quirks *quirks; bool supports_bus_clear; bool has_apb_dma;
u32 tlow_std_mode;
u32 thigh_std_mode;
u32 tlow_fast_fastplus_mode;
u32 thigh_fast_fastplus_mode;
u32 setup_hold_time_std_mode;
u32 setup_hold_time_fast_fast_plus_mode;
u32 setup_hold_time_hs_mode; bool has_interface_timing_reg;
};
/** * struct tegra_i2c_dev - per device I2C context * @dev: device reference for power management * @hw: Tegra I2C HW feature * @adapter: core I2C layer adapter information * @div_clk: clock reference for div clock of I2C controller * @clocks: array of I2C controller clocks * @nclocks: number of clocks in the array * @rst: reset control for the I2C controller * @base: ioremapped registers cookie * @base_phys: physical base address of the I2C controller * @cont_id: I2C controller ID, used for packet header * @irq: IRQ number of transfer complete interrupt * @is_dvc: identifies the DVC I2C controller, has a different register layout * @is_vi: identifies the VI I2C controller, has a different register layout * @msg_complete: transfer completion notifier * @msg_buf_remaining: size of unsent data in the message buffer * @msg_len: length of message in current transfer * @msg_err: error code for completed message * @msg_buf: pointer to current message data * @msg_read: indicates that the transfer is a read access * @timings: i2c timings information like bus frequency * @multimaster_mode: indicates that I2C controller is in multi-master mode * @dma_chan: DMA channel * @dma_phys: handle to DMA resources * @dma_buf: pointer to allocated DMA buffer * @dma_buf_size: DMA buffer size * @dma_dev: DMA device used for transfers * @dma_mode: indicates active DMA transfer * @dma_complete: DMA completion notifier * @atomic_mode: indicates active atomic transfer
*/ struct tegra_i2c_dev { struct device *dev; struct i2c_adapter adapter;
/* * If necessary, i2c_writel() and i2c_readl() will offset the register * in order to talk to the I2C block inside the DVC block.
*/ static u32 tegra_i2c_reg_addr(struct tegra_i2c_dev *i2c_dev, unsignedint reg)
{ if (IS_DVC(i2c_dev))
reg += (reg >= I2C_TX_FIFO) ? 0x10 : 0x40; elseif (IS_VI(i2c_dev))
reg = 0xc00 + (reg << 2);
/* * VI I2C controller has known hardware bug where writes get stuck * when immediate multiple writes happen to TX_FIFO register. * Recommended software work around is to read I2C register after * each write to TX_FIFO register to flush out the data.
*/ while (len--)
i2c_writel(i2c_dev, *data32++, reg);
}
if (i2c_dev->hw->has_apb_dma) { if (!IS_ENABLED(CONFIG_TEGRA20_APB_DMA)) {
dev_dbg(i2c_dev->dev, "APB DMA support not enabled\n"); return 0;
}
} elseif (!IS_ENABLED(CONFIG_TEGRA186_GPC_DMA)) {
dev_dbg(i2c_dev->dev, "GPC DMA support not enabled\n"); return 0;
}
/* * The same channel will be used for both RX and TX. * Keeping the name as "tx" for backward compatibility * with existing devicetrees.
*/
i2c_dev->dma_chan = dma_request_chan(i2c_dev->dev, "tx"); if (IS_ERR(i2c_dev->dma_chan)) {
err = PTR_ERR(i2c_dev->dma_chan);
i2c_dev->dma_chan = NULL; goto err_out;
}
err_out:
tegra_i2c_release_dma(i2c_dev); if (err != -EPROBE_DEFER) {
dev_err(i2c_dev->dev, "cannot use DMA: %d\n", err);
dev_err(i2c_dev->dev, "falling back to PIO\n"); return 0;
}
return err;
}
/* * One of the Tegra I2C blocks is inside the DVC (Digital Voltage Controller) * block. This block is identical to the rest of the I2C blocks, except that * it only supports master mode, it has registers moved around, and it needs * some extra init to get it into I2C mode. The register moves are handled * by i2c_readl() and i2c_writel().
*/ staticvoid tegra_dvc_init(struct tegra_i2c_dev *i2c_dev)
{
u32 val;
val = dvc_readl(i2c_dev, DVC_CTRL_REG3);
val |= DVC_CTRL_REG3_SW_PROG;
val |= DVC_CTRL_REG3_I2C_DONE_INTR_EN;
dvc_writel(i2c_dev, val, DVC_CTRL_REG3);
val = dvc_readl(i2c_dev, DVC_CTRL_REG1);
val |= DVC_CTRL_REG1_INTR_EN;
dvc_writel(i2c_dev, val, DVC_CTRL_REG1);
}
err = tegra_i2c_poll_register(i2c_dev, I2C_CONFIG_LOAD, 0xffffffff,
1000, I2C_CONFIG_LOAD_TIMEOUT); if (err) {
dev_err(i2c_dev->dev, "failed to load config\n"); return err;
}
return 0;
}
staticint tegra_i2c_master_reset(struct tegra_i2c_dev *i2c_dev)
{ if (!i2c_dev->hw->has_mst_reset) return -EOPNOTSUPP;
/* * Writing 1 to I2C_MASTER_RESET_CNTRL will reset all internal state of * Master logic including FIFOs. Clear this bit to 0 for normal operation. * SW needs to wait for 2us after assertion and de-assertion of this soft * reset.
*/
i2c_writel(i2c_dev, 0x1, I2C_MASTER_RESET_CNTRL);
fsleep(2);
/* * Reset the controller before initializing it. * In case if device_reset() returns -ENOENT, i.e. when the reset is * not available, the internal software reset will be used if it is * supported by the controller.
*/
err = device_reset(i2c_dev->dev); if (err == -ENOENT)
err = tegra_i2c_master_reset(i2c_dev);
/* * The reset shouldn't ever fail in practice. The failure will be a * sign of a severe problem that needs to be resolved. Still we don't * want to fail the initialization completely because this may break * kernel boot up since voltage regulators use I2C. Hence, we will * emit a noisy warning on error, which won't stay unnoticed and * won't hose machine entirely.
*/
WARN_ON_ONCE(err);
if (IS_DVC(i2c_dev))
tegra_dvc_init(i2c_dev);
val = I2C_CNFG_NEW_MASTER_FSM | I2C_CNFG_PACKET_MODE_EN |
FIELD_PREP(I2C_CNFG_DEBOUNCE_CNT, 2);
if (i2c_dev->hw->has_multi_master_mode)
val |= I2C_CNFG_MULTI_MASTER_MODE;
if (i2c_dev->hw->has_interface_timing_reg) {
val = FIELD_PREP(I2C_INTERFACE_TIMING_THIGH, thigh) |
FIELD_PREP(I2C_INTERFACE_TIMING_TLOW, tlow);
i2c_writel(i2c_dev, val, I2C_INTERFACE_TIMING_0);
}
/* * Configure setup and hold times only when tsu_thd is non-zero. * Otherwise, preserve the chip default values.
*/ if (i2c_dev->hw->has_interface_timing_reg && tsu_thd)
i2c_writel(i2c_dev, tsu_thd, I2C_INTERFACE_TIMING_1);
/* * NACK interrupt is generated before the I2C controller generates * the STOP condition on the bus. So, wait for 2 clock periods * before disabling the controller so that the STOP condition has * been delivered properly.
*/
udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->timings.bus_freq_hz));
/* * Catch overflow due to message fully sent before the check for * RX FIFO availability.
*/ if (WARN_ON_ONCE(!(i2c_dev->msg_buf_remaining))) return -EINVAL;
if (i2c_dev->hw->has_mst_fifo) {
val = i2c_readl(i2c_dev, I2C_MST_FIFO_STATUS);
rx_fifo_avail = FIELD_GET(I2C_MST_FIFO_STATUS_RX, val);
} else {
val = i2c_readl(i2c_dev, I2C_FIFO_STATUS);
rx_fifo_avail = FIELD_GET(I2C_FIFO_STATUS_RX, val);
}
/* round down to exclude partial word at the end of buffer */
words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD; if (words_to_transfer > rx_fifo_avail)
words_to_transfer = rx_fifo_avail;
/* * If there is a partial word at the end of buffer, handle it * manually to prevent overwriting past the end of buffer.
*/ if (rx_fifo_avail > 0 && buf_remaining > 0) { /* * buf_remaining > 3 check not needed as rx_fifo_avail == 0 * when (words_to_transfer was > rx_fifo_avail) earlier * in this function.
*/
val = i2c_readl(i2c_dev, I2C_RX_FIFO);
val = cpu_to_le32(val);
memcpy(buf, &val, buf_remaining);
buf_remaining = 0;
rx_fifo_avail--;
}
/* RX FIFO must be drained, otherwise it's an Overflow case. */ if (WARN_ON_ONCE(rx_fifo_avail)) return -EINVAL;
if (i2c_dev->hw->has_mst_fifo) {
val = i2c_readl(i2c_dev, I2C_MST_FIFO_STATUS);
tx_fifo_avail = FIELD_GET(I2C_MST_FIFO_STATUS_TX, val);
} else {
val = i2c_readl(i2c_dev, I2C_FIFO_STATUS);
tx_fifo_avail = FIELD_GET(I2C_FIFO_STATUS_TX, val);
}
/* round down to exclude partial word at the end of buffer */
words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
/* * This hunk pushes 4 bytes at a time into the TX FIFO. * * It's very common to have < 4 bytes, hence there is no word * to push if we have less than 4 bytes to transfer.
*/ if (words_to_transfer) { if (words_to_transfer > tx_fifo_avail)
words_to_transfer = tx_fifo_avail;
/* * Update state before writing to FIFO. Note that this may * cause us to finish writing all bytes (AKA buf_remaining * goes to 0), hence we have a potential for an interrupt * (PACKET_XFER_COMPLETE is not maskable), but GIC interrupt * is disabled at this point.
*/
buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
tx_fifo_avail -= words_to_transfer;
/* * If there is a partial word at the end of buffer, handle it manually * to prevent reading past the end of buffer, which could cross a page * boundary and fault.
*/ if (tx_fifo_avail > 0 && buf_remaining > 0) { /* * buf_remaining > 3 check not needed as tx_fifo_avail == 0 * when (words_to_transfer was > tx_fifo_avail) earlier * in this function for non-zero words_to_transfer.
*/
memcpy(&val, buf, buf_remaining);
val = le32_to_cpu(val);
if (status & status_err) {
tegra_i2c_disable_packet_mode(i2c_dev); if (status & I2C_INT_NO_ACK)
i2c_dev->msg_err |= I2C_ERR_NO_ACK; if (status & I2C_INT_ARBITRATION_LOST)
i2c_dev->msg_err |= I2C_ERR_ARBITRATION_LOST; goto err;
}
/* * I2C transfer is terminated during the bus clear, so skip * processing the other interrupts.
*/ if (i2c_dev->hw->supports_bus_clear && (status & I2C_INT_BUS_CLR_DONE)) goto err;
if (!i2c_dev->dma_mode) { if (i2c_dev->msg_read && (status & I2C_INT_RX_FIFO_DATA_REQ)) { if (tegra_i2c_empty_rx_fifo(i2c_dev)) { /* * Overflow error condition: message fully sent, * with no XFER_COMPLETE interrupt but hardware * asks to transfer more.
*/
i2c_dev->msg_err |= I2C_ERR_RX_BUFFER_OVERFLOW; goto err;
}
}
if (!i2c_dev->msg_read && (status & I2C_INT_TX_FIFO_DATA_REQ)) { if (i2c_dev->msg_buf_remaining)
tegra_i2c_fill_tx_fifo(i2c_dev); else
tegra_i2c_mask_irq(i2c_dev,
I2C_INT_TX_FIFO_DATA_REQ);
}
}
i2c_writel(i2c_dev, status, I2C_INT_STATUS); if (IS_DVC(i2c_dev))
dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
/* * During message read XFER_COMPLETE interrupt is triggered prior to * DMA completion and during message write XFER_COMPLETE interrupt is * triggered after DMA completion. * * PACKETS_XFER_COMPLETE indicates completion of all bytes of transfer, * so forcing msg_buf_remaining to 0 in DMA mode.
*/ if (status & I2C_INT_PACKET_XFER_COMPLETE) { if (i2c_dev->dma_mode)
i2c_dev->msg_buf_remaining = 0; /* * Underflow error condition: XFER_COMPLETE before message * fully sent.
*/ if (WARN_ON_ONCE(i2c_dev->msg_buf_remaining)) {
i2c_dev->msg_err |= I2C_ERR_UNKNOWN_INTERRUPT; goto err;
}
complete(&i2c_dev->msg_complete);
} goto done;
err: /* mask all interrupts on error */
tegra_i2c_mask_irq(i2c_dev,
I2C_INT_NO_ACK |
I2C_INT_ARBITRATION_LOST |
I2C_INT_PACKET_XFER_COMPLETE |
I2C_INT_TX_FIFO_DATA_REQ |
I2C_INT_RX_FIFO_DATA_REQ);
if (i2c_dev->hw->supports_bus_clear)
tegra_i2c_mask_irq(i2c_dev, I2C_INT_BUS_CLR_DONE);
i2c_writel(i2c_dev, status, I2C_INT_STATUS);
if (IS_DVC(i2c_dev))
dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
if (i2c_dev->dma_mode) {
dmaengine_terminate_async(i2c_dev->dma_chan);
complete(&i2c_dev->dma_complete);
}
if (i2c_dev->atomic_mode) {
ret = tegra_i2c_poll_completion(i2c_dev, complete, timeout_ms);
} else {
enable_irq(i2c_dev->irq);
ret = wait_for_completion_timeout(complete,
msecs_to_jiffies(timeout_ms));
disable_irq(i2c_dev->irq);
/* * Under some rare circumstances (like running KASAN + * NFS root) CPU, which handles interrupt, may stuck in * uninterruptible state for a significant time. In this * case we will get timeout if I2C transfer is running on * a sibling CPU, despite of IRQ being raised. * * In order to handle this rare condition, the IRQ status * needs to be checked after timeout.
*/ if (ret == 0)
ret = tegra_i2c_poll_completion(i2c_dev, complete, 0);
}
/* start recovery upon arbitration loss in single master mode */ if (i2c_dev->msg_err == I2C_ERR_ARBITRATION_LOST) { if (!i2c_dev->multimaster_mode) return i2c_recover_bus(&i2c_dev->adapter);
return -EAGAIN;
}
if (i2c_dev->msg_err == I2C_ERR_NO_ACK) { if (msg->flags & I2C_M_IGNORE_NAK) return 0;
/* * For SMBUS block read command, read only 1 byte in the first transfer. * Adjust that 1 byte for the next transfer in the msg buffer and msg * length.
*/ if (msg->flags & I2C_M_RECV_LEN) { if (end_state == MSG_END_CONTINUE) {
i2c_dev->msg_len = 1;
} else {
i2c_dev->msg_buf += 1;
i2c_dev->msg_len -= 1;
}
}
if (i2c_dev->dma_mode) {
time_left = tegra_i2c_wait_completion(i2c_dev,
&i2c_dev->dma_complete,
xfer_time);
/* * Synchronize DMA first, since dmaengine_terminate_sync() * performs synchronization after the transfer's termination * and we want to get a completion if transfer succeeded.
*/
dmaengine_synchronize(i2c_dev->dma_chan);
dmaengine_terminate_sync(i2c_dev->dma_chan);
if (!time_left && !completion_done(&i2c_dev->dma_complete)) {
tegra_i2c_init(i2c_dev); return -ETIMEDOUT;
}
if (i2c_dev->msg_read && i2c_dev->msg_err == I2C_ERR_NONE)
memcpy(i2c_dev->msg_buf, i2c_dev->dma_buf, i2c_dev->msg_len);
}
err = tegra_i2c_error_recover(i2c_dev, msg); if (err) return err;
return 0;
}
staticint tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
{ struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap); int i, ret;
ret = pm_runtime_get_sync(i2c_dev->dev); if (ret < 0) {
dev_err(i2c_dev->dev, "runtime resume failed %d\n", ret);
pm_runtime_put_noidle(i2c_dev->dev); return ret;
}
for (i = 0; i < num; i++) { enum msg_end_type end_type = MSG_END_STOP;
if (i < (num - 1)) { /* check whether follow up message is coming */ if (msgs[i + 1].flags & I2C_M_NOSTART)
end_type = MSG_END_CONTINUE; else
end_type = MSG_END_REPEAT_START;
} /* If M_RECV_LEN use ContinueXfer to read the first byte */ if (msgs[i].flags & I2C_M_RECV_LEN) {
ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], MSG_END_CONTINUE); if (ret) break;
/* Validate message length before proceeding */ if (msgs[i].buf[0] == 0 || msgs[i].buf[0] > I2C_SMBUS_BLOCK_MAX) break;
/* Set the msg length from first byte */
msgs[i].len += msgs[i].buf[0];
dev_dbg(i2c_dev->dev, "reading %d bytes\n", msgs[i].len);
}
ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], end_type); if (ret) break;
}
pm_runtime_put(i2c_dev->dev);
return ret ?: i;
}
staticint tegra_i2c_xfer_atomic(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
{ struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap); int ret;
err = tegra_i2c_init_clocks(i2c_dev); if (err) return err;
err = tegra_i2c_init_dma(i2c_dev); if (err) goto release_clocks;
/* * VI I2C is in VE power domain which is not always ON and not * IRQ-safe. Thus, IRQ-safe device shouldn't be attached to a * non IRQ-safe domain because this prevents powering off the power * domain. * * VI I2C device shouldn't be marked as IRQ-safe because VI I2C won't * be used for atomic transfers. ACPI device is not IRQ safe also.
*/ if (!IS_VI(i2c_dev) && !has_acpi_companion(i2c_dev->dev))
pm_runtime_irq_safe(i2c_dev->dev);
pm_runtime_enable(i2c_dev->dev);
err = tegra_i2c_init_hardware(i2c_dev); if (err) goto release_rpm;
err = pinctrl_pm_select_default_state(dev); if (err) return err;
err = clk_bulk_enable(i2c_dev->nclocks, i2c_dev->clocks); if (err) return err;
/* * VI I2C device is attached to VE power domain which goes through * power ON/OFF during runtime PM resume/suspend, meaning that * controller needs to be re-initialized after power ON.
*/ if (IS_VI(i2c_dev)) {
err = tegra_i2c_init(i2c_dev); if (err) goto disable_clocks;
}
/* * We need to ensure that clocks are enabled so that registers can be * restored in tegra_i2c_init().
*/
err = tegra_i2c_runtime_resume(dev); if (err) return err;
err = tegra_i2c_init(i2c_dev); if (err) return err;
/* * In case we are runtime suspended, disable clocks again so that we * don't unbalance the clock reference counts during the next runtime * resume transition.
*/ if (pm_runtime_status_suspended(dev)) {
err = tegra_i2c_runtime_suspend(dev); if (err) return err;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.