/* * Handle stop conditions early, prior to SLAVE_MATCH. Some masters may drive * transfers with low enough latency between the nak/stop phase of the current * command and the start/address phase of the following command that the * interrupts are coalesced by the time we process them.
*/ if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) {
irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
bus->slave_state = ASPEED_I2C_SLAVE_STOP;
}
/* Propagate any stop conditions to the slave implementation. */ if (bus->slave_state == ASPEED_I2C_SLAVE_STOP) {
i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
}
/* * Now that we've dealt with any potentially coalesced stop conditions, * address any start conditions.
*/ if (irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH) {
irq_handled |= ASPEED_I2CD_INTR_SLAVE_MATCH;
bus->slave_state = ASPEED_I2C_SLAVE_START;
}
/* * If the slave has been stopped and not started then slave interrupt * handling is complete.
*/ if (bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE) return irq_handled;
#if IS_ENABLED(CONFIG_I2C_SLAVE) /* * If it's requested in the middle of a slave session, set the master * state to 'pending' then H/W will continue handling this master * command when the bus comes back to the idle state.
*/ if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE) {
bus->master_state = ASPEED_I2C_MASTER_PENDING; return;
} #endif/* CONFIG_I2C_SLAVE */
if (msg->flags & I2C_M_RD) {
command |= ASPEED_I2CD_M_RX_CMD; /* Need to let the hardware know to NACK after RX. */ if (msg->len == 1 && !(msg->flags & I2C_M_RECV_LEN))
command |= ASPEED_I2CD_M_S_RX_CMD_LAST;
}
/* * We encountered an interrupt that reports an error: the hardware * should clear the command queue effectively taking us back to the * INACTIVE state.
*/
ret = aspeed_i2c_is_irq_error(irq_status); if (ret) {
dev_dbg(bus->dev, "received error interrupt: 0x%08x\n",
irq_status);
irq_handled |= (irq_status & ASPEED_I2CD_INTR_MASTER_ERRORS); if (bus->master_state != ASPEED_I2C_MASTER_INACTIVE) {
irq_handled = irq_status;
bus->cmd_err = ret;
bus->master_state = ASPEED_I2C_MASTER_INACTIVE; goto out_complete;
}
}
/* Master is not currently active, irq was for someone else. */ if (bus->master_state == ASPEED_I2C_MASTER_INACTIVE ||
bus->master_state == ASPEED_I2C_MASTER_PENDING) goto out_no_complete;
/* We are in an invalid state; reset bus to a known state. */ if (!bus->msgs) {
dev_err(bus->dev, "bus in unknown state. irq_status: 0x%x\n",
irq_status);
bus->cmd_err = -EIO; if (bus->master_state != ASPEED_I2C_MASTER_STOP &&
bus->master_state != ASPEED_I2C_MASTER_INACTIVE)
aspeed_i2c_do_stop(bus); goto out_no_complete;
}
msg = &bus->msgs[bus->msgs_index];
/* * START is a special case because we still have to handle a subsequent * TX or RX immediately after we handle it, so we handle it here and * then update the state and handle the new state below.
*/ if (bus->master_state == ASPEED_I2C_MASTER_START) { #if IS_ENABLED(CONFIG_I2C_SLAVE) /* * If a peer master starts a xfer immediately after it queues a * master command, clear the queued master command and change * its state to 'pending'. To simplify handling of pending * cases, it uses S/W solution instead of H/W command queue * handling.
*/ if (unlikely(irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH)) {
writel(readl(bus->base + ASPEED_I2C_CMD_REG) &
~ASPEED_I2CD_MASTER_CMDS_MASK,
bus->base + ASPEED_I2C_CMD_REG);
bus->master_state = ASPEED_I2C_MASTER_PENDING;
dev_dbg(bus->dev, "master goes pending due to a slave start\n"); goto out_no_complete;
} #endif/* CONFIG_I2C_SLAVE */ if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) { if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_NAK))) {
bus->cmd_err = -ENXIO;
bus->master_state = ASPEED_I2C_MASTER_INACTIVE; goto out_complete;
}
pr_devel("no slave present at %02x\n", msg->addr);
irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
bus->cmd_err = -ENXIO;
aspeed_i2c_do_stop(bus); goto out_no_complete;
}
irq_handled |= ASPEED_I2CD_INTR_TX_ACK; if (msg->len == 0) { /* SMBUS_QUICK */
aspeed_i2c_do_stop(bus); goto out_no_complete;
} if (msg->flags & I2C_M_RD)
bus->master_state = ASPEED_I2C_MASTER_RX_FIRST; else
bus->master_state = ASPEED_I2C_MASTER_TX_FIRST;
}
switch (bus->master_state) { case ASPEED_I2C_MASTER_TX: if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_NAK)) {
dev_dbg(bus->dev, "slave NACKed TX\n");
irq_handled |= ASPEED_I2CD_INTR_TX_NAK; goto error_and_stop;
} elseif (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) {
dev_err(bus->dev, "slave failed to ACK TX\n"); goto error_and_stop;
}
irq_handled |= ASPEED_I2CD_INTR_TX_ACK;
fallthrough; case ASPEED_I2C_MASTER_TX_FIRST: if (bus->buf_index < msg->len) {
bus->master_state = ASPEED_I2C_MASTER_TX;
writel(msg->buf[bus->buf_index++],
bus->base + ASPEED_I2C_BYTE_BUF_REG);
writel(ASPEED_I2CD_M_TX_CMD,
bus->base + ASPEED_I2C_CMD_REG);
} else {
aspeed_i2c_next_msg_or_stop(bus);
} goto out_no_complete; case ASPEED_I2C_MASTER_RX_FIRST: /* RX may not have completed yet (only address cycle) */ if (!(irq_status & ASPEED_I2CD_INTR_RX_DONE)) goto out_no_complete;
fallthrough; case ASPEED_I2C_MASTER_RX: if (unlikely(!(irq_status & ASPEED_I2CD_INTR_RX_DONE))) {
dev_err(bus->dev, "master failed to RX\n"); goto error_and_stop;
}
irq_handled |= ASPEED_I2CD_INTR_RX_DONE;
#if IS_ENABLED(CONFIG_I2C_SLAVE) /* * In most cases, interrupt bits will be set one by one, although * multiple interrupt bits could be set at the same time. It's also * possible that master interrupt bits could be set along with slave * interrupt bits. Each case needs to be handled using corresponding * handlers depending on the current state.
*/ if (bus->master_state != ASPEED_I2C_MASTER_INACTIVE &&
bus->master_state != ASPEED_I2C_MASTER_PENDING) {
irq_handled = aspeed_i2c_master_irq(bus, irq_remaining);
irq_remaining &= ~irq_handled; if (irq_remaining)
irq_handled |= aspeed_i2c_slave_irq(bus, irq_remaining);
} else {
irq_handled = aspeed_i2c_slave_irq(bus, irq_remaining);
irq_remaining &= ~irq_handled; if (irq_remaining)
irq_handled |= aspeed_i2c_master_irq(bus,
irq_remaining);
}
/* * Start a pending master command at here if a slave operation is * completed.
*/ if (bus->master_state == ASPEED_I2C_MASTER_PENDING &&
bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
aspeed_i2c_do_start(bus); #else
irq_handled = aspeed_i2c_master_irq(bus, irq_remaining); #endif/* CONFIG_I2C_SLAVE */
irq_remaining &= ~irq_handled; if (irq_remaining)
dev_err(bus->dev, "irq handled != irq. expected 0x%08x, but was 0x%08x\n",
irq_received, irq_handled);
/* If bus is busy in a single master environment, attempt recovery. */ if (!bus->multi_master &&
(readl(bus->base + ASPEED_I2C_CMD_REG) &
ASPEED_I2CD_BUS_BUSY_STS)) { int ret;
spin_unlock_irqrestore(&bus->lock, flags);
ret = aspeed_i2c_recover_bus(bus); if (ret) return ret;
spin_lock_irqsave(&bus->lock, flags);
}
if (time_left == 0) { /* * In a multi-master setup, if a timeout occurs, attempt * recovery. But if the bus is idle, we still need to reset the * i2c controller to clear the remaining interrupts.
*/ if (bus->multi_master &&
(readl(bus->base + ASPEED_I2C_CMD_REG) &
ASPEED_I2CD_BUS_BUSY_STS))
aspeed_i2c_recover_bus(bus); else
aspeed_i2c_reset(bus);
/* * If timed out and the state is still pending, drop the pending * master command.
*/
spin_lock_irqsave(&bus->lock, flags); if (bus->master_state == ASPEED_I2C_MASTER_PENDING)
bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
spin_unlock_irqrestore(&bus->lock, flags);
#if IS_ENABLED(CONFIG_I2C_SLAVE) /* precondition: bus.lock has been acquired. */ staticvoid __aspeed_i2c_reg_slave(struct aspeed_i2c_bus *bus, u16 slave_addr)
{
u32 addr_reg_val, func_ctrl_reg_val;
/* * Set slave addr. Reserved bits can all safely be written with zeros * on all of ast2[456]00, so zero everything else to ensure we only * enable a single slave address (ast2500 has two, ast2600 has three, * the enable bits for which are also in this register) so that we don't * end up with additional phantom devices responding on the bus.
*/
addr_reg_val = slave_addr & ASPEED_I2CD_DEV_ADDR_MASK;
writel(addr_reg_val, bus->base + ASPEED_I2C_DEV_ADDR_REG);
/* * SCL_high and SCL_low represent a value 1 greater than what is stored * since a zero divider is meaningless. Thus, the max value each can * store is every bit set + 1. Since SCL_high and SCL_low are added * together (see below), the max value of both is the max value of one * them times two.
*/
clk_high_low_max = (clk_high_low_mask + 1) * 2;
/* * The actual clock frequency of SCL is: * SCL_freq = APB_freq / (base_freq * (SCL_high + SCL_low)) * = APB_freq / divisor * where base_freq is a programmable clock divider; its value is * base_freq = 1 << base_clk_divisor * SCL_high is the number of base_freq clock cycles that SCL stays high * and SCL_low is the number of base_freq clock cycles that SCL stays * low for a period of SCL. * The actual register has a minimum SCL_high and SCL_low minimum of 1; * thus, they start counting at zero. So * SCL_high = clk_high + 1 * SCL_low = clk_low + 1 * Thus, * SCL_freq = APB_freq / * ((1 << base_clk_divisor) * (clk_high + 1 + clk_low + 1)) * The documentation recommends clk_high >= clk_high_max / 2 and * clk_low >= clk_low_max / 2 - 1 when possible; this last constraint * gives us the following solution:
*/
base_clk_divisor = divisor > clk_high_low_max ?
ilog2((divisor - 1) / clk_high_low_max) + 1 : 0;
static u32 aspeed_i2c_24xx_get_clk_reg_val(struct device *dev, u32 divisor)
{ /* * clk_high and clk_low are each 3 bits wide, so each can hold a max * value of 8 giving a clk_high_low_max of 16.
*/ return aspeed_i2c_get_clk_reg_val(dev, GENMASK(2, 0), divisor);
}
static u32 aspeed_i2c_25xx_get_clk_reg_val(struct device *dev, u32 divisor)
{ /* * clk_high and clk_low are each 4 bits wide, so each can hold a max * value of 16 giving a clk_high_low_max of 32.
*/ return aspeed_i2c_get_clk_reg_val(dev, GENMASK(3, 0), divisor);
}
/* precondition: bus.lock has been acquired. */ staticint aspeed_i2c_init_clk(struct aspeed_i2c_bus *bus)
{
u32 divisor, clk_reg_val;
#if IS_ENABLED(CONFIG_I2C_SLAVE) /* If slave has already been registered, re-enable it. */ if (bus->slave)
__aspeed_i2c_reg_slave(bus, bus->slave->addr); #endif/* CONFIG_I2C_SLAVE */
/* Set interrupt generation of I2C controller */
writel(ASPEED_I2CD_INTR_ALL, bus->base + ASPEED_I2C_INTR_CTRL_REG);
bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL); if (!bus) return -ENOMEM;
bus->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(bus->base)) return PTR_ERR(bus->base);
parent_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(parent_clk)) return PTR_ERR(parent_clk);
bus->parent_clk_frequency = clk_get_rate(parent_clk); /* We just need the clock rate, we don't actually use the clk object. */
devm_clk_put(&pdev->dev, parent_clk);
bus->rst = devm_reset_control_get_shared(&pdev->dev, NULL); if (IS_ERR(bus->rst)) {
dev_err(&pdev->dev, "missing or invalid reset controller device tree entry\n"); return PTR_ERR(bus->rst);
}
reset_control_deassert(bus->rst);
ret = of_property_read_u32(pdev->dev.of_node, "bus-frequency", &bus->bus_frequency); if (ret < 0) {
dev_err(&pdev->dev, "Could not read bus-frequency property\n");
bus->bus_frequency = I2C_MAX_STANDARD_MODE_FREQ;
}
match = of_match_node(aspeed_i2c_bus_of_table, pdev->dev.of_node); if (!match)
bus->get_clk_reg_val = aspeed_i2c_24xx_get_clk_reg_val; else
bus->get_clk_reg_val = (u32 (*)(struct device *, u32))
match->data;
/* Clean up any left over interrupt state. */
writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG);
writel(0xffffffff, bus->base + ASPEED_I2C_INTR_STS_REG); /* * bus.lock does not need to be held because the interrupt handler has * not been enabled yet.
*/
ret = aspeed_i2c_init(bus, pdev); if (ret < 0) return ret;
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
ret = devm_request_irq(&pdev->dev, irq, aspeed_i2c_bus_irq,
0, dev_name(&pdev->dev), bus); if (ret < 0) return ret;
ret = i2c_add_adapter(&bus->adap); if (ret < 0) return ret;
platform_set_drvdata(pdev, bus);
dev_info(bus->dev, "i2c bus %d registered, irq %d\n",
bus->adap.nr, irq);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.