// SPDX-License-Identifier: GPL-2.0-or-later /* * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver * * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. * * Thanks to the following companies for their support: * * - JMicron (hardware and technical support)
*/
/* * This can be called before sdhci_add_host() by Vendor's host controller * driver to enable v4 mode if supported.
*/ void sdhci_enable_v4_mode(struct sdhci_host *host)
{
host->v4_mode = true;
sdhci_do_enable_v4_mode(host);
}
EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);
if (mask & SDHCI_RESET_ALL) {
host->clock = 0; /* Reset-all turns off SD Bus Power */ if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
sdhci_runtime_pm_bus_off(host);
}
/* Wait max 100 ms */
timeout = ktime_add_ms(ktime_get(), 100);
/* hw clears the bit when it's done */ while (1) { bool timedout = ktime_after(ktime_get(), timeout);
if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) break; if (timedout) {
pr_err("%s: Reset 0x%x never completed.\n",
mmc_hostname(host->mmc), (int)mask);
sdhci_err_stats_inc(host, CTRL_TIMEOUT);
sdhci_dumpregs(host); return;
}
udelay(10);
}
}
EXPORT_SYMBOL_GPL(sdhci_reset);
switch (reason) { case SDHCI_RESET_FOR_INIT:
sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); break; case SDHCI_RESET_FOR_REQUEST_ERROR: case SDHCI_RESET_FOR_TUNING_ABORT: case SDHCI_RESET_FOR_CARD_REMOVED: case SDHCI_RESET_FOR_CQE_RECOVERY:
sdhci_do_reset(host, SDHCI_RESET_CMD);
sdhci_do_reset(host, SDHCI_RESET_DATA); break; case SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY:
sdhci_do_reset(host, SDHCI_RESET_DATA); break;
}
}
/* * Always adjust the DMA selection as some controllers * (e.g. JMicron) can't do PIO properly when the selection * is ADMA.
*/
ctrl &= ~SDHCI_CTRL_DMA_MASK; if (!(host->flags & SDHCI_REQ_USE_DMA)) goto out;
/* Note if DMA Select is zero then SDMA is selected */ if (host->flags & SDHCI_USE_ADMA)
ctrl |= SDHCI_CTRL_ADMA32;
if (host->flags & SDHCI_USE_64_BIT_DMA) { /* * If v4 mode, all supported DMA can be 64-bit addressing if * controller supports 64-bit system address, otherwise only * ADMA can support 64-bit addressing.
*/ if (host->v4_mode) {
ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
ctrl2 |= SDHCI_CTRL_64BIT_ADDR;
sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
} elseif (host->flags & SDHCI_USE_ADMA) { /* * Don't need to undo SDHCI_CTRL_ADMA32 in order to * set SDHCI_CTRL_ADMA64.
*/
ctrl |= SDHCI_CTRL_ADMA64;
}
}
/* * A change to the card detect bits indicates a change in present state, * refer sdhci_set_card_detection(). A card detect interrupt might have * been missed while the host controller was being reset, so trigger a * rescan to check.
*/ if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT)))
mmc_detect_change(host->mmc, msecs_to_jiffies(200));
}
/* * Some controllers (JMicron JMB38x) mess up the buffer bits * for transfers < 4 bytes. As long as it is just one block, * we can ignore the bits.
*/ if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
(host->data->blocks == 1))
mask = ~0;
while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
udelay(100);
if (host->data->flags & MMC_DATA_READ)
sdhci_read_block_pio(host); else
sdhci_write_block_pio(host);
host->blocks--; if (host->blocks == 0) break;
}
DBG("PIO transfer complete.\n");
}
staticint sdhci_pre_dma_transfer(struct sdhci_host *host, struct mmc_data *data, int cookie)
{ int sg_count;
/* * If the data buffers are already mapped, return the previous * dma_map_sg() result.
*/ if (data->host_cookie == COOKIE_PRE_MAPPED) return data->sg_count;
/* Bounce write requests to the bounce buffer */ if (host->bounce_buffer) { unsignedint length = data->blksz * data->blocks;
if (length > host->bounce_buffer_size) {
pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
mmc_hostname(host->mmc), length,
host->bounce_buffer_size); return -EIO;
} if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { /* Copy the data to the bounce buffer */ if (host->ops->copy_to_bounce_buffer) {
host->ops->copy_to_bounce_buffer(host,
data, length);
} else {
sg_copy_to_buffer(data->sg, data->sg_len,
host->bounce_buffer, length);
}
} /* Switch ownership to the DMA */
dma_sync_single_for_device(mmc_dev(host->mmc),
host->bounce_addr,
host->bounce_buffer_size,
mmc_get_dma_dir(data)); /* Just a dummy value */
sg_count = 1;
} else { /* Just access the data directly from memory */
sg_count = dma_map_sg(mmc_dev(host->mmc),
data->sg, data->sg_len,
mmc_get_dma_dir(data));
}
/* 32-bit and 64-bit descriptors have these members in same position */
dma_desc->cmd = cpu_to_le16(cmd);
dma_desc->len = cpu_to_le16(len);
dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr));
if (host->flags & SDHCI_USE_64_BIT_DMA)
dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr));
/* * The SDHCI specification states that ADMA addresses must * be 32-bit aligned. If they aren't, then we use a bounce * buffer for the (up to three) bytes that screw up the * alignment.
*/
offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
SDHCI_ADMA2_MASK; if (offset) { if (data->flags & MMC_DATA_WRITE) {
buffer = sdhci_kmap_atomic(sg);
memcpy(align, buffer, offset);
sdhci_kunmap_atomic(buffer);
}
/* * The block layer forces a minimum segment size of PAGE_SIZE, * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write * multiple descriptors, noting that the ADMA table is sized * for 4KiB chunks anyway, so it will be big enough.
*/ while (len > host->max_adma) { int n = 32 * 1024; /* 32KiB*/
__sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID);
addr += n;
len -= n;
}
/* * If this triggers then we have a calculation bug * somewhere. :/
*/
WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
}
if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { /* Mark the last descriptor as the terminating descriptor */ if (desc != host->adma_table) {
desc -= host->desc_sz;
sdhci_adma_mark_end(desc);
}
} else { /* Add a terminating entry - nop, end, valid */
__sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID);
}
}
staticvoid sdhci_adma_table_post(struct sdhci_host *host, struct mmc_data *data)
{ struct scatterlist *sg; int i, size; void *align; char *buffer;
if (data->flags & MMC_DATA_READ) { bool has_unaligned = false;
/* Do a quick scan of the SG list for any unaligned mappings */
for_each_sg(data->sg, sg, host->sg_count, i) if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
has_unaligned = true; break;
}
if (has_unaligned) {
dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
data->sg_len, DMA_FROM_DEVICE);
/* timeout in us */ if (!data) {
target_timeout = cmd->busy_timeout * 1000;
} else {
target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000); if (host->clock && data->timeout_clks) { unsignedlonglong val;
/* * data->timeout_clks is in units of clock cycles. * host->clock is in Hz. target_timeout is in us. * Hence, us = 1000000 * cycles / Hz. Round up.
*/
val = 1000000ULL * data->timeout_clks; if (do_div(val, host->clock))
target_timeout++;
target_timeout += val;
}
}
/* * If the host controller provides us with an incorrect timeout * value, just skip the check and use the maximum. The hardware may take * longer to time out, but that's much better than having a too-short * timeout value.
*/ if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) return host->max_timeout_count;
/* Unspecified command, assume max */ if (cmd == NULL) return host->max_timeout_count;
data = cmd->data; /* Unspecified timeout, assume max */ if (!data && !cmd->busy_timeout) return host->max_timeout_count;
/* timeout in us */
target_timeout = sdhci_target_timeout(host, cmd, data);
/* * Figure out needed cycles. * We do this in steps in order to fit inside a 32 bit int. * The first step is the minimum timeout, which will have a * minimum resolution of 6 bits: * (1) 2^13*1000 > 2^22, * (2) host->timeout_clk < 2^16 * => * (1) / (2) > 2^6
*/
count = 0;
current_timeout = (1 << 13) * 1000 / host->timeout_clk; while (current_timeout < target_timeout) {
count++;
current_timeout <<= 1; if (count > host->max_timeout_count) { if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
DBG("Too large timeout 0x%x requested for CMD%d!\n",
count, cmd->opcode);
count = host->max_timeout_count;
*too_big = true; break;
}
}
staticinlinevoid sdhci_set_block_info(struct sdhci_host *host, struct mmc_data *data)
{ /* Set the DMA boundary value and block size */
sdhci_writew(host,
SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
SDHCI_BLOCK_SIZE); /* * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count * can be supported, in that case 16-bit block count register must be 0.
*/ if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
(host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) { if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
} else {
sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
}
}
/* * FIXME: This doesn't account for merging when mapping the * scatterlist. * * The assumption here being that alignment and lengths are * the same after DMA mapping to device address space.
*/
length_mask = 0;
offset_mask = 0; if (host->flags & SDHCI_USE_ADMA) { if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
length_mask = 3; /* * As we use up to 3 byte chunks to work * around alignment problems, we need to * check the offset as well.
*/
offset_mask = 3;
}
} else { if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
length_mask = 3; if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
offset_mask = 3;
}
if (unlikely(length_mask | offset_mask)) {
for_each_sg(data->sg, sg, data->sg_len, i) { if (sg->length & length_mask) {
DBG("Reverting to PIO because of transfer size (%d)\n",
sg->length);
host->flags &= ~SDHCI_REQ_USE_DMA; break;
} if (sg->offset & offset_mask) {
DBG("Reverting to PIO because of bad alignment\n");
host->flags &= ~SDHCI_REQ_USE_DMA; break;
}
}
}
}
sdhci_config_dma(host);
if (host->flags & SDHCI_REQ_USE_DMA) { int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
if (sg_cnt <= 0) { /* * This only happens when someone fed * us an invalid request.
*/
WARN_ON(1);
host->flags &= ~SDHCI_REQ_USE_DMA;
} elseif (host->flags & SDHCI_USE_ADMA) {
sdhci_adma_table_pre(host, data, sg_cnt);
sdhci_set_adma_addr(host, host->adma_addr);
} else {
WARN_ON(sg_cnt != 1);
sdhci_set_sdma_addr(host, sdhci_sdma_address(host));
}
}
if (!(host->flags & SDHCI_REQ_USE_DMA)) { int flags;
/* Sanity check: all the SG entries must be aligned by block size. */ for (i = 0; i < data->sg_len; i++) { if ((data->sg + i)->length % data->blksz) return -EINVAL;
}
chan = sdhci_external_dma_channel(host, data);
ret = dmaengine_slave_config(chan, &cfg); if (ret) return ret;
/* * In case of Version 4.10 or later, use of 'Auto CMD Auto * Select' is recommended rather than use of 'Auto CMD12 * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode * here because some controllers (e.g sdhci-of-dwmshc) expect it.
*/ if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
(use_cmd12 || use_cmd23)) {
*mode |= SDHCI_TRNS_AUTO_SEL;
/* * If we are sending CMD23, CMD12 never gets sent * on successful completion (so no Auto-CMD12).
*/ if (use_cmd12)
*mode |= SDHCI_TRNS_AUTO_CMD12; elseif (use_cmd23)
*mode |= SDHCI_TRNS_AUTO_CMD23;
}
if (data == NULL) { if (host->quirks2 &
SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) { /* must not clear SDHCI_TRANSFER_MODE when tuning */ if (!mmc_op_tuning(cmd->opcode))
sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
} else { /* clear Auto CMD settings for no data CMDs */
mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
} return;
}
WARN_ON(!host->data);
if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
mode = SDHCI_TRNS_BLK_CNT_EN;
/* * The specification states that the block count register must * be updated, but it does not specify at what point in the * data flow. That makes the register entirely useless to read * back so we have to assume that nothing made it to the card * in the event of an error.
*/ if (data->error)
data->bytes_xfered = 0; else
data->bytes_xfered = data->blksz * data->blocks;
}
EXPORT_SYMBOL_GPL(__sdhci_finish_data_common);
/* * Need to send CMD12 if - * a) open-ended multiblock transfer not using auto CMD12 (no CMD23) * b) error in multiblock transfer
*/ if (data->stop &&
((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) ||
data->error)) { /* * 'cap_cmd_during_tfr' request must not use the command line * after mmc_command_done() has been called. It is upper layer's * responsibility to send the stop command if required.
*/ if (data->mrq->cap_cmd_during_tfr) {
__sdhci_finish_mrq(host, data->mrq);
} else { /* Avoid triggering warning in sdhci_send_command() */
host->cmd = NULL; if (!sdhci_send_command(host, data->stop)) { if (sw_data_timeout) { /* * This is anyway a sw data timeout, so * give up now.
*/
data->stop->error = -EIO;
__sdhci_finish_mrq(host, data->mrq);
} else {
WARN_ON(host->deferred_cmd);
host->deferred_cmd = data->stop;
}
}
}
} else {
__sdhci_finish_mrq(host, data->mrq);
}
}
mask = SDHCI_CMD_INHIBIT; if (sdhci_data_line_cmd(cmd))
mask |= SDHCI_DATA_INHIBIT;
/* We shouldn't wait for data inihibit for stop commands, even
though they might use busy signaling */ if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
mask &= ~SDHCI_DATA_INHIBIT;
if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) returnfalse;
if (cmd->data) { if (host->use_external_dma)
sdhci_external_dma_prepare_data(host, cmd); else
sdhci_prepare_data(host, cmd);
}
sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
sdhci_set_transfer_mode(host, cmd);
if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
WARN_ONCE(1, "Unsupported response type!\n"); /* * This does not happen in practice because 136-bit response * commands never have busy waiting, so rather than complicate * the error path, just remove busy waiting and continue.
*/
cmd->flags &= ~MMC_RSP_BUSY;
}
while (!sdhci_send_command(host, cmd)) { if (!timeout--) {
pr_err("%s: Controller never released inhibit bit(s).\n",
mmc_hostname(host->mmc));
sdhci_err_stats_inc(host, CTRL_TIMEOUT);
sdhci_dumpregs(host);
cmd->error = -EIO; returnfalse;
}
spin_unlock_irqrestore(&host->lock, flags);
usleep_range(1000, 1250);
present = host->mmc->ops->get_cd(host->mmc);
spin_lock_irqsave(&host->lock, flags);
/* A deferred command might disappear, handle that */ if (cmd == deferred_cmd && cmd != host->deferred_cmd) returntrue;
if (sdhci_present_error(host, cmd, present)) returnfalse;
}
if (cmd == host->deferred_cmd)
host->deferred_cmd = NULL;
returntrue;
}
staticvoid sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
{ int i, reg;
for (i = 0; i < 4; i++) {
reg = SDHCI_RESPONSE + (3 - i) * 4;
cmd->resp[i] = sdhci_readl(host, reg);
}
if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC) return;
/* CRC is stripped so we need to do some shifting */ for (i = 0; i < 4; i++) {
cmd->resp[i] <<= 8; if (i != 3)
cmd->resp[i] |= cmd->resp[i + 1] >> 24;
}
}
if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
mmc_command_done(host->mmc, cmd->mrq);
/* * The host can send and interrupt when the busy state has * ended, allowing us to wait without wasting CPU cycles. * The busy signal uses DAT0 so this is similar to waiting * for data to complete. * * Note: The 1.0 specification is a bit ambiguous about this * feature so there might be some problems with older * controllers.
*/ if (cmd->flags & MMC_RSP_BUSY) { if (cmd->data) {
DBG("Cannot wait for busy signal when also doing a data transfer");
} elseif (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
cmd == host->data_cmd) { /* Command complete before busy is ended */ return;
}
}
/* Finished CMD23, now send actual command. */ if (cmd == cmd->mrq->sbc) { if (!sdhci_send_command(host, cmd->mrq->cmd)) {
WARN_ON(host->deferred_cmd);
host->deferred_cmd = cmd->mrq->cmd;
}
} else {
/* Processed actual command. */ if (host->data && host->data_early)
sdhci_finish_data(host);
if (!cmd->data)
__sdhci_finish_mrq(host, cmd->mrq);
}
}
/* * Check if the Host Controller supports Programmable Clock * Mode.
*/ if (host->clk_mul) { for (div = 1; div <= 1024; div++) { if ((host->max_clk * host->clk_mul / div)
<= clock) break;
} if ((host->max_clk * host->clk_mul / div) <= clock) { /* * Set Programmable Clock Mode in the Clock * Control register.
*/
clk = SDHCI_PROG_CLOCK_MODE;
real_div = div;
clk_mul = host->clk_mul;
div--;
} else { /* * Divisor can be too small to reach clock * speed requirement. Then use the base clock.
*/
switch_base_clk = true;
}
}
if (!host->clk_mul || switch_base_clk) { /* Version 3.00 divisors must be a multiple of 2. */ if (host->max_clk <= clock)
div = 1; else { for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
div += 2) { if ((host->max_clk / div) <= clock) break;
}
}
real_div = div;
div >>= 1; if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
&& !div && host->max_clk <= 25000000)
div = 1;
}
} else { /* Version 2.00 divisors must be a power of 2. */ for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { if ((host->max_clk / div) <= clock) break;
}
real_div = div;
div >>= 1;
}
unsignedshort sdhci_get_vdd_value(unsignedshort vdd)
{ switch (1 << vdd) { case MMC_VDD_165_195: /* * Without a regulator, SDHCI does not support 2.0v * so we only get here if the driver deliberately * added the 2.0v range to ocr_avail. Map it to 1.8v * for the purpose of turning on the power.
*/ case MMC_VDD_20_21: return SDHCI_POWER_180; case MMC_VDD_29_30: case MMC_VDD_30_31: return SDHCI_POWER_300; case MMC_VDD_32_33: case MMC_VDD_33_34: /* * 3.4V ~ 3.6V are valid only for those platforms where it's * known that the voltage range is supported by hardware.
*/ case MMC_VDD_34_35: case MMC_VDD_35_36: return SDHCI_POWER_330; default: return 0;
}
}
EXPORT_SYMBOL_GPL(sdhci_get_vdd_value);
if (mode != MMC_POWER_OFF) {
pwr = sdhci_get_vdd_value(vdd); if (!pwr) {
WARN(1, "%s: Invalid vdd %#x\n",
mmc_hostname(host->mmc), vdd);
}
}
if (host->pwr == pwr) return;
host->pwr = pwr;
if (pwr == 0) {
sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
sdhci_runtime_pm_bus_off(host);
} else { /* * Spec says that we should clear the power reg before setting * a new value. Some controllers don't seem to like this though.
*/ if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
/* * At least the Marvell CaFe chip gets confused if we set the * voltage and set turn on power at the same time, so set the * voltage first.
*/ if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
pwr |= SDHCI_POWER_ON;
sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
sdhci_runtime_pm_bus_on(host);
/* * Some controllers need an extra 10ms delay of 10ms before * they can apply clock after applying power
*/ if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
mdelay(10);
}
}
EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
/* * Some controllers need to configure a valid bus voltage on their power * register regardless of whether an external regulator is taking care of power * supply. This helper function takes care of it if set as the controller's * sdhci_ops.set_power callback.
*/ void sdhci_set_power_and_bus_voltage(struct sdhci_host *host, unsignedchar mode, unsignedshort vdd)
{ if (!IS_ERR(host->mmc->supply.vmmc)) { struct mmc_host *mmc = host->mmc;
/* * The HSQ may send a command in interrupt context without polling * the busy signaling, which means we should return BUSY if controller * has not released inhibit bits to allow HSQ trying to send request * again in non-atomic context. So we should not finish this request * here.
*/ if (!sdhci_send_command(host, cmd))
ret = -EBUSY; else
sdhci_led_activate(host);
staticbool sdhci_timing_has_preset(unsignedchar timing)
{ switch (timing) { case MMC_TIMING_UHS_SDR12: case MMC_TIMING_UHS_SDR25: case MMC_TIMING_UHS_SDR50: case MMC_TIMING_UHS_SDR104: case MMC_TIMING_UHS_DDR50: case MMC_TIMING_MMC_DDR52: returntrue;
} returnfalse;
}
/* * Reset the chip on each power off. * Should clear out any weird states.
*/ if (ios->power_mode == MMC_POWER_OFF) {
sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
sdhci_reinit(host);
}
if (host->ops->set_power)
host->ops->set_power(host, ios->power_mode, ios->vdd); else
sdhci_set_power(host, ios->power_mode, ios->vdd);
if (host->ops->platform_send_init_74_clocks)
host->ops->platform_send_init_74_clocks(host, ios->power_mode);
host->ops->set_bus_width(host, ios->bus_width);
/* * Special case to avoid multiple clock changes during voltage * switching.
*/ if (!reinit_uhs &&
turning_on_clk &&
host->timing == ios->timing &&
host->version >= SDHCI_SPEC_300 &&
!sdhci_presetable_values_change(host, ios)) return;
if (host->version >= SDHCI_SPEC_300) {
u16 clk, ctrl_2;
/* * According to SDHCI Spec v3.00, if the Preset Value * Enable in the Host Control 2 register is set, we * need to reset SD Clock Enable before changing High * Speed Enable to avoid generating clock glitches.
*/
clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); if (clk & SDHCI_CLOCK_CARD_EN) {
clk &= ~SDHCI_CLOCK_CARD_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
}
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
if (!host->preset_enabled) { /* * We only need to set Driver Strength if the * preset value enable is not set.
*/
ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; elseif (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; elseif (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C; elseif (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D; else {
pr_warn("%s: invalid driver type, default to driver type B\n",
mmc_hostname(mmc));
ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.