/* The max erase timeout, used when host->max_busy_timeout isn't specified */ #define MMC_ERASE_TIMEOUT_MS (60 * 1000) /* 60 s */ #define SD_DISCARD_TIMEOUT_MS (250)
/* * Enabling software CRCs on the data blocks can be a significant (30%) * performance cost, and for other reasons may not always be desired. * So we allow it to be disabled.
*/ bool use_spi_crc = 1;
module_param(use_spi_crc, bool, 0);
staticint mmc_schedule_delayed_work(struct delayed_work *work, unsignedlong delay)
{ /* * We use the system_freezable_wq, because of two reasons. * First, it allows several works (not the same work item) to be * executed simultaneously. Second, the queue becomes frozen when * userspace becomes frozen during system PM.
*/ return queue_delayed_work(system_freezable_wq, work, delay);
}
#ifdef CONFIG_FAIL_MMC_REQUEST
/* * Internal function. Inject random data errors. * If mmc_data is NULL no errors are injected.
*/ staticvoid mmc_should_fail_request(struct mmc_host *host, struct mmc_request *mrq)
{ struct mmc_command *cmd = mrq->cmd; struct mmc_data *data = mrq->data; staticconstint data_errors[] = {
-ETIMEDOUT,
-EILSEQ,
-EIO,
};
/** * mmc_request_done - finish processing an MMC request * @host: MMC host which completed request * @mrq: MMC request which request * * MMC drivers should call this function when they have completed * their processing of a request.
*/ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
{ struct mmc_command *cmd = mrq->cmd; int err = cmd->error;
if (err && cmd->retries && mmc_host_is_spi(host)) { if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
cmd->retries = 0;
}
if (host->ongoing_mrq == mrq)
host->ongoing_mrq = NULL;
mmc_complete_cmd(mrq);
trace_mmc_request_done(host, mrq);
/* * We list various conditions for the command to be considered * properly done: * * - There was no error, OK fine then * - We are not doing some kind of retry * - The card was removed (...so just complete everything no matter * if there are errors or retries)
*/ if (!err || !cmd->retries || mmc_card_removed(host->card)) {
mmc_should_fail_request(host, mrq);
if (!host->ongoing_mrq)
led_trigger_event(host->led, LED_OFF);
if (mrq->stop) {
pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
mmc_hostname(host), mrq->stop->opcode,
mrq->stop->error,
mrq->stop->resp[0], mrq->stop->resp[1],
mrq->stop->resp[2], mrq->stop->resp[3]);
}
} /* * Request starter must handle retries - see * mmc_wait_for_req_done().
*/ if (mrq->done)
mrq->done(mrq);
}
EXPORT_SYMBOL(mmc_request_done);
staticvoid __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{ int err;
/* Assumes host controller has been runtime resumed by mmc_claim_host */
err = mmc_retune(host); if (err) {
mrq->cmd->error = err;
mmc_request_done(host, mrq); return;
}
/* * For sdio rw commands we must wait for card busy otherwise some * sdio devices won't work properly. * And bypass I/O abort, reset and bus suspend operations.
*/ if (sdio_is_io_busy(mrq->cmd->opcode, mrq->cmd->arg) &&
host->ops->card_busy) { int tries = 500; /* Wait aprox 500ms at maximum */
while (host->ops->card_busy(host) && --tries)
mmc_delay(1);
if (mrq->cap_cmd_during_tfr) {
host->ongoing_mrq = mrq; /* * Retry path could come through here without having waiting on * cmd_completion, so ensure it is reinitialised.
*/
reinit_completion(&mrq->cmd_completion);
}
/* * If there is an ongoing transfer, wait for the command line to become * available.
*/ if (ongoing_mrq && !completion_done(&ongoing_mrq->cmd_completion))
wait_for_completion(&ongoing_mrq->cmd_completion);
}
staticint __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
{ int err;
/* * mmc_cqe_start_req - Start a CQE request. * @host: MMC host to start the request * @mrq: request to start * * Start the request, re-tuning if needed and it is possible. Returns an error * code if the request fails to start or -EBUSY if CQE is busy.
*/ int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
{ int err;
/* * CQE cannot process re-tuning commands. Caller must hold retuning * while CQE is in use. Re-tuning can happen here only when CQE has no * active requests i.e. this is the first. Note, re-tuning will call * ->cqe_off().
*/
err = mmc_retune(host); if (err) goto out_err;
mrq->host = host;
mmc_mrq_pr_debug(host, mrq, true);
err = mmc_mrq_prep(host, mrq); if (err) goto out_err;
if (host->uhs2_sd_tran)
mmc_uhs2_prepare_cmd(host, mrq);
err = host->cqe_ops->cqe_request(host, mrq); if (err) goto out_err;
trace_mmc_request_start(host, mrq);
return 0;
out_err: if (mrq->cmd) {
pr_debug("%s: failed to start CQE direct CMD%u, error %d\n",
mmc_hostname(host), mrq->cmd->opcode, err);
} else {
pr_debug("%s: failed to start CQE transfer for tag %d, error %d\n",
mmc_hostname(host), mrq->tag, err);
} return err;
}
EXPORT_SYMBOL(mmc_cqe_start_req);
/** * mmc_cqe_request_done - CQE has finished processing an MMC request * @host: MMC host which completed request * @mrq: MMC request which completed * * CQE drivers should call this function when they have completed * their processing of a request.
*/ void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq)
{
mmc_should_fail_request(host, mrq);
/* Flag re-tuning needed on CRC errors */ if ((mrq->cmd && mrq->cmd->error == -EILSEQ) ||
(mrq->data && mrq->data->error == -EILSEQ))
mmc_retune_needed(host);
trace_mmc_request_done(host, mrq);
if (mrq->cmd) {
pr_debug("%s: CQE req done (direct CMD%u): %d\n",
mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->error);
} else {
pr_debug("%s: CQE transfer done tag %d\n",
mmc_hostname(host), mrq->tag);
}
/** * mmc_cqe_post_req - CQE post process of a completed MMC request * @host: MMC host * @mrq: MMC request to be processed
*/ void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq)
{ if (host->cqe_ops->cqe_post_req)
host->cqe_ops->cqe_post_req(host, mrq);
}
EXPORT_SYMBOL(mmc_cqe_post_req);
/* Arbitrary 1 second timeout */ #define MMC_CQE_RECOVERY_TIMEOUT 1000
/* * mmc_cqe_recovery - Recover from CQE errors. * @host: MMC host to recover * * Recovery consists of stopping CQE, stopping eMMC, discarding the queue * in eMMC, and discarding the queue in CQE. CQE must call * mmc_cqe_request_done() on all requests. An error is returned if the eMMC * fails to discard its queue.
*/ int mmc_cqe_recovery(struct mmc_host *host)
{ struct mmc_command cmd; int err;
mmc_retune_hold_now(host);
/* * Recovery is expected seldom, if at all, but it reduces performance, * so make sure it is not completely silent.
*/
pr_warn("%s: running CQE recovery\n", mmc_hostname(host));
if (err)
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
mmc_retune_release(host);
return err;
}
EXPORT_SYMBOL(mmc_cqe_recovery);
/** * mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done * @host: MMC host * @mrq: MMC request * * mmc_is_req_done() is used with requests that have * mrq->cap_cmd_during_tfr = true. mmc_is_req_done() must be called after * starting a request and before waiting for it to complete. That is, * either in between calls to mmc_start_req(), or after mmc_wait_for_req() * and before mmc_wait_for_req_done(). If it is called at other times the * result is not meaningful.
*/ bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
{ return completion_done(&mrq->completion);
}
EXPORT_SYMBOL(mmc_is_req_done);
/** * mmc_wait_for_req - start a request and wait for completion * @host: MMC host to start command * @mrq: MMC request to start * * Start a new MMC custom command request for a host, and wait * for the command to complete. In the case of 'cap_cmd_during_tfr' * requests, the transfer is ongoing and the caller can issue further * commands that do not use the data lines, and then wait by calling * mmc_wait_for_req_done(). * Does not attempt to parse the response.
*/ void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
{
__mmc_start_req(host, mrq);
if (!mrq->cap_cmd_during_tfr)
mmc_wait_for_req_done(host, mrq);
}
EXPORT_SYMBOL(mmc_wait_for_req);
/** * mmc_wait_for_cmd - start a command and wait for completion * @host: MMC host to start command * @cmd: MMC command to start * @retries: maximum number of retries * * Start a new MMC command for a host, and wait for the command * to complete. Return any error that occurred while the command * was executing. Do not attempt to parse the response.
*/ int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
{ struct mmc_request mrq = {};
/** * mmc_set_data_timeout - set the timeout for a data command * @data: data phase for command * @card: the MMC card associated with the data transfer * * Computes the data timeout parameters according to the * correct algorithm given the card type.
*/ void mmc_set_data_timeout(struct mmc_data *data, conststruct mmc_card *card)
{ unsignedint mult;
/* * SDIO cards only define an upper 1 s limit on access.
*/ if (mmc_card_sdio(card)) {
data->timeout_ns = 1000000000;
data->timeout_clks = 0; return;
}
/* * SD cards use a 100 multiplier rather than 10
*/
mult = mmc_card_sd(card) ? 100 : 10;
/* * Scale up the multiplier (and therefore the timeout) by * the r2w factor for writes.
*/ if (data->flags & MMC_DATA_WRITE)
mult <<= card->csd.r2w_factor;
if (data->flags & MMC_DATA_WRITE) /* * The MMC spec "It is strongly recommended * for hosts to implement more than 500ms * timeout value even if the card indicates * the 250ms maximum busy length." Even the * previous value of 300ms is known to be * insufficient for some cards.
*/
limit_us = 3000000; else
limit_us = 100000;
/* * SDHC cards always use these fixed values.
*/ if (timeout_us > limit_us) {
data->timeout_ns = limit_us * 1000;
data->timeout_clks = 0;
}
/* assign limit value if invalid */ if (timeout_us == 0)
data->timeout_ns = limit_us * 1000;
}
/* * Some cards require longer data read timeout than indicated in CSD. * Address this by setting the read timeout to a "reasonably high" * value. For the cards tested, 600ms has proven enough. If necessary, * this value can be increased if other problematic cards require this.
*/ if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
data->timeout_ns = 600000000;
data->timeout_clks = 0;
}
/* * Some cards need very high timeouts if driven in SPI mode. * The worst observed timeout was 900ms after writing a * continuous stream of data until the internal logic * overflowed.
*/ if (mmc_host_is_spi(card->host)) { if (data->flags & MMC_DATA_WRITE) { if (data->timeout_ns < 1000000000)
data->timeout_ns = 1000000000; /* 1s */
} else { if (data->timeout_ns < 100000000)
data->timeout_ns = 100000000; /* 100ms */
}
}
}
EXPORT_SYMBOL(mmc_set_data_timeout);
/* * Allow claiming an already claimed host if the context is the same or there is * no context but the task is the same.
*/ staticinlinebool mmc_ctx_matches(struct mmc_host *host, struct mmc_ctx *ctx, struct task_struct *task)
{ return host->claimer == ctx ||
(!ctx && task && host->claimer->task == task);
}
staticinlinevoid mmc_ctx_set_claimer(struct mmc_host *host, struct mmc_ctx *ctx, struct task_struct *task)
{ if (!host->claimer) { if (ctx)
host->claimer = ctx; else
host->claimer = &host->default_ctx;
} if (task)
host->claimer->task = task;
}
/** * __mmc_claim_host - exclusively claim a host * @host: mmc host to claim * @ctx: context that claims the host or NULL in which case the default * context will be used * @abort: whether or not the operation should be aborted * * Claim a host for a set of operations. If @abort is non null and * dereference a non-zero value then this will return prematurely with * that non-zero value without acquiring the lock. Returns zero * with the lock held otherwise.
*/ int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx,
atomic_t *abort)
{ struct task_struct *task = ctx ? NULL : current;
DECLARE_WAITQUEUE(wait, current); unsignedlong flags; int stop; bool pm = false;
/* * This is a helper function, which fetches a runtime pm reference for the * card device and also claims the host.
*/ void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx)
{
pm_runtime_get_sync(&card->dev);
__mmc_claim_host(card->host, ctx, NULL);
}
EXPORT_SYMBOL(mmc_get_card);
/* * This is a helper function, which releases the host and drops the runtime * pm reference for the card device.
*/ void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx)
{ struct mmc_host *host = card->host;
/* * Internal function that does the actual ios call to the host driver, * optionally printing some debug output.
*/ staticinlinevoid mmc_set_ios(struct mmc_host *host)
{ struct mmc_ios *ios = &host->ios;
/* * Control chip select pin on a host.
*/ void mmc_set_chip_select(struct mmc_host *host, int mode)
{
host->ios.chip_select = mode;
mmc_set_ios(host);
}
/* * Sets the host clock to the highest possible frequency that * is below "hz".
*/ void mmc_set_clock(struct mmc_host *host, unsignedint hz)
{
WARN_ON(hz && hz < host->f_min);
if (hz > host->f_max)
hz = host->f_max;
host->ios.clock = hz;
mmc_set_ios(host);
}
int mmc_execute_tuning(struct mmc_card *card)
{ struct mmc_host *host = card->host;
u32 opcode; int err;
if (!host->ops->execute_tuning) return 0;
if (host->cqe_on)
host->cqe_ops->cqe_off(host);
if (mmc_card_mmc(card))
opcode = MMC_SEND_TUNING_BLOCK_HS200; else
opcode = MMC_SEND_TUNING_BLOCK;
/* Only print error when we don't check for card removal */ if (!host->detect_change) {
pr_err("%s: tuning execution failed: %d\n",
mmc_hostname(host), err);
mmc_debugfs_err_stats_inc(host, MMC_ERR_TUNING);
}
return err;
}
/* * Change the bus mode (open drain/push-pull) of a host.
*/ void mmc_set_bus_mode(struct mmc_host *host, unsignedint mode)
{
host->ios.bus_mode = mode;
mmc_set_ios(host);
}
/* * Change data bus width of a host.
*/ void mmc_set_bus_width(struct mmc_host *host, unsignedint width)
{
host->ios.bus_width = width;
mmc_set_ios(host);
}
/* * Set initial state after a power cycle or a hw_reset.
*/ void mmc_set_initial_state(struct mmc_host *host)
{ if (host->cqe_on)
host->cqe_ops->cqe_off(host);
/* * Make sure we are in non-enhanced strobe mode before we * actually enable it in ext_csd.
*/ if ((host->caps2 & MMC_CAP2_HS400_ES) &&
host->ops->hs400_enhanced_strobe)
host->ops->hs400_enhanced_strobe(host, &host->ios);
mmc_set_ios(host);
mmc_crypto_set_initial_state(host);
}
/** * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number * @vdd: voltage (mV) * @low_bits: prefer low bits in boundary cases * * This function returns the OCR bit number according to the provided @vdd * value. If conversion is not possible a negative errno value returned. * * Depending on the @low_bits flag the function prefers low or high OCR bits * on boundary voltages. For example, * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33); * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34); * * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
*/ staticint mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
{ constint max_bit = ilog2(MMC_VDD_35_36); int bit;
if (vdd < 1650 || vdd > 3600) return -EINVAL;
if (vdd >= 1650 && vdd <= 1950) return ilog2(MMC_VDD_165_195);
if (low_bits)
vdd -= 1;
/* Base 2000 mV, step 100 mV, bit's base 8. */
bit = (vdd - 2000) / 100 + 8; if (bit > max_bit) return max_bit; return bit;
}
/** * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask * @vdd_min: minimum voltage value (mV) * @vdd_max: maximum voltage value (mV) * * This function returns the OCR mask bits according to the provided @vdd_min * and @vdd_max values. If conversion is not possible the function returns 0. * * Notes wrt boundary cases: * This function sets the OCR bits for all boundary voltages, for example * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 | * MMC_VDD_34_35 mask.
*/
u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
{
u32 mask = 0;
if (vdd_max < vdd_min) return 0;
/* Prefer high bits for the boundary vdd_max values. */
vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false); if (vdd_max < 0) return 0;
/* Prefer low bits for the boundary vdd_min values. */
vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true); if (vdd_min < 0) return 0;
/* Fill the mask, from max bit to min bit. */ while (vdd_max >= vdd_min)
mask |= 1 << vdd_max--;
return mask;
}
staticint mmc_of_get_func_num(struct device_node *node)
{
u32 reg; int ret;
ret = of_property_read_u32(node, "reg", ®); if (ret < 0) return ret;
if (!host->parent || !host->parent->of_node) return NULL;
for_each_child_of_node(host->parent->of_node, node) { if (mmc_of_get_func_num(node) == func_num) return node;
}
return NULL;
}
/* * Mask off any voltages we don't support and select * the lowest voltage
*/
u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
{ int bit;
/* * Sanity check the voltages that the card claims to * support.
*/ if (ocr & 0x7F) {
dev_warn(mmc_dev(host), "card claims to support voltages below defined range\n");
ocr &= ~0x7F;
}
ocr &= host->ocr_avail; if (!ocr) {
dev_warn(mmc_dev(host), "no support for card's volts\n"); return 0;
}
if (!mmc_card_uhs2(host) && host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
bit = ffs(ocr) - 1;
ocr &= 3 << bit;
mmc_power_cycle(host, ocr);
} else {
bit = fls(ocr) - 1; /* * The bit variable represents the highest voltage bit set in * the OCR register. * To keep a range of 2 values (e.g. 3.2V/3.3V and 3.3V/3.4V), * we must shift the mask '3' with (bit - 1).
*/
ocr &= 3 << (bit - 1); if (bit != host->ios.vdd)
dev_warn(mmc_dev(host), "exceeding card's volts\n");
}
return ocr;
}
int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
{ int err = 0; int old_signal_voltage = host->ios.signal_voltage;
host->ios.signal_voltage = signal_voltage; if (host->ops->start_signal_voltage_switch)
err = host->ops->start_signal_voltage_switch(host, &host->ios);
if (err)
host->ios.signal_voltage = old_signal_voltage;
return err;
}
void mmc_set_initial_signal_voltage(struct mmc_host *host)
{ /* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */ if (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330))
dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n"); elseif (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180))
dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n"); elseif (!mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120))
dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
}
int mmc_host_set_uhs_voltage(struct mmc_host *host)
{
u32 clock;
/* * During a signal voltage level switch, the clock must be gated * for 5 ms according to the SD spec
*/
clock = host->ios.clock;
host->ios.clock = 0;
mmc_set_ios(host);
if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180)) return -EAGAIN;
/* Keep clock gated for at least 10 ms, though spec only says 5 ms */
mmc_delay(10);
host->ios.clock = clock;
mmc_set_ios(host);
return 0;
}
int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
{ struct mmc_command cmd = {}; int err = 0;
/* * If we cannot switch voltages, return failure so the caller * can continue without UHS mode
*/ if (!host->ops->start_signal_voltage_switch) return -EPERM; if (!host->ops->card_busy)
pr_warn("%s: cannot verify signal voltage switch\n",
mmc_hostname(host));
err = mmc_wait_for_cmd(host, &cmd, 0); if (err) goto power_cycle;
if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) return -EIO;
/* * The card should drive cmd and dat[0:3] low immediately * after the response of cmd11, but wait 1 ms to be sure
*/
mmc_delay(1); if (host->ops->card_busy && !host->ops->card_busy(host)) {
err = -EAGAIN; goto power_cycle;
}
if (mmc_host_set_uhs_voltage(host)) { /* * Voltages may not have been switched, but we've already * sent CMD11, so a power cycle is required anyway
*/
err = -EAGAIN; goto power_cycle;
}
/* Wait for at least 1 ms according to spec */
mmc_delay(1);
/* * Failure to switch is indicated by the card holding * dat[0:3] low
*/ if (host->ops->card_busy && host->ops->card_busy(host))
err = -EAGAIN;
power_cycle: if (err) {
pr_debug("%s: Signal voltage switch failed, " "power cycling card\n", mmc_hostname(host));
mmc_power_cycle(host, ocr);
}
int mmc_select_drive_strength(struct mmc_card *card, unsignedint max_dtr, int card_drv_type, int *drv_type)
{ struct mmc_host *host = card->host; int host_drv_type = SD_DRIVER_TYPE_B;
*drv_type = 0;
if (!host->ops->select_drive_strength) return 0;
/* Use SD definition of driver strength for hosts */ if (host->caps & MMC_CAP_DRIVER_TYPE_A)
host_drv_type |= SD_DRIVER_TYPE_A;
if (host->caps & MMC_CAP_DRIVER_TYPE_C)
host_drv_type |= SD_DRIVER_TYPE_C;
if (host->caps & MMC_CAP_DRIVER_TYPE_D)
host_drv_type |= SD_DRIVER_TYPE_D;
/* * The drive strength that the hardware can support * depends on the board design. Pass the appropriate * information and let the hardware specific code * return what is possible given the options
*/ return host->ops->select_drive_strength(card, max_dtr,
host_drv_type,
card_drv_type,
drv_type);
}
/* * Apply power to the MMC stack. This is a two-stage process. * First, we enable power to the card without the clock running. * We then wait a bit for the power to stabilise. Finally, * enable the bus drivers and clock to the card. * * We must _NOT_ enable the clock prior to power stablising. * * If a host does all the power sequencing itself, ignore the * initial MMC_POWER_UP stage.
*/ void mmc_power_up(struct mmc_host *host, u32 ocr)
{ if (host->ios.power_mode == MMC_POWER_ON) return;
mmc_pwrseq_pre_power_on(host);
host->ios.vdd = fls(ocr) - 1;
host->ios.power_mode = MMC_POWER_UP; /* Set initial state and call mmc_set_ios */
mmc_set_initial_state(host);
mmc_set_initial_signal_voltage(host);
/* * This delay should be sufficient to allow the power supply * to reach the minimum voltage.
*/
mmc_delay(host->ios.power_delay_ms);
/* * This delay must be at least 74 clock sizes, or 1 ms, or the * time required to reach a stable voltage.
*/
mmc_delay(host->ios.power_delay_ms);
}
void mmc_power_off(struct mmc_host *host)
{ if (host->ios.power_mode == MMC_POWER_OFF) return;
mmc_pwrseq_power_off(host);
host->ios.clock = 0;
host->ios.vdd = 0;
host->ios.power_mode = MMC_POWER_OFF; /* Set initial state and call mmc_set_ios */
mmc_set_initial_state(host);
/* * Some configurations, such as the 802.11 SDIO card in the OLPC * XO-1.5, require a short delay after poweroff before the card * can be successfully turned on again.
*/
mmc_delay(1);
}
void mmc_power_cycle(struct mmc_host *host, u32 ocr)
{
mmc_power_off(host); /* Wait at least 1 ms according to SD spec */
mmc_delay(1);
mmc_power_up(host, ocr);
}
/* * Assign a mmc bus handler to a host. Only one bus handler may control a * host at any given time.
*/ void mmc_attach_bus(struct mmc_host *host, conststruct mmc_bus_ops *ops)
{
host->bus_ops = ops;
}
/* * Remove the current bus handler from a host.
*/ void mmc_detach_bus(struct mmc_host *host)
{
host->bus_ops = NULL;
}
void _mmc_detect_change(struct mmc_host *host, unsignedlong delay, bool cd_irq)
{ /* * Prevent system sleep for 5s to allow user space to consume the * corresponding uevent. This is especially useful, when CD irq is used * as a system wakeup, but doesn't hurt in other cases.
*/ if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL))
__pm_wakeup_event(host->ws, 5000);
/** * mmc_detect_change - process change of state on a MMC socket * @host: host which changed state. * @delay: optional delay to wait before detection (jiffies) * * MMC drivers should call this when they detect a card has been * inserted or removed. The MMC layer will confirm that any * present card is still functional, and initialize any newly * inserted.
*/ void mmc_detect_change(struct mmc_host *host, unsignedlong delay)
{
_mmc_detect_change(host, delay, true);
}
EXPORT_SYMBOL(mmc_detect_change);
/* * It is possible to erase an arbitrarily large area of an SD or MMC * card. That is not desirable because it can take a long time * (minutes) potentially delaying more important I/O, and also the * timeout calculations become increasingly hugely over-estimated. * Consequently, 'pref_erase' is defined as a guide to limit erases * to that size and alignment. * * For SD cards that define Allocation Unit size, limit erases to one * Allocation Unit at a time. * For MMC, have a stab at ai good value and for modern cards it will * end up being 4MiB. Note that if the value is too small, it can end * up taking longer to erase. Also note, erase_size is already set to * High Capacity Erase Size if available when this function is called.
*/ if (mmc_card_sd(card) && card->ssr.au) {
card->pref_erase = card->ssr.au;
card->erase_shift = ffs(card->ssr.au) - 1;
} elseif (card->erase_size) {
sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11; if (sz < 128)
card->pref_erase = 512 * 1024 / 512; elseif (sz < 512)
card->pref_erase = 1024 * 1024 / 512; elseif (sz < 1024)
card->pref_erase = 2 * 1024 * 1024 / 512; else
card->pref_erase = 4 * 1024 * 1024 / 512; if (card->pref_erase < card->erase_size)
card->pref_erase = card->erase_size; else {
sz = card->pref_erase % card->erase_size; if (sz)
card->pref_erase += card->erase_size - sz;
}
} else
card->pref_erase = 0;
}
/* * ios.clock is only a target. The real clock rate might be * less but not that much less, so fudge it by multiplying by 2.
*/
timeout_clks <<= 1;
timeout_us += (timeout_clks * 1000) /
(card->host->ios.clock / 1000);
erase_timeout = timeout_us / 1000;
/* * Theoretically, the calculation could underflow so round up * to 1ms in that case.
*/ if (!erase_timeout)
erase_timeout = 1;
}
/* Multiplier for secure operations */ if (arg & MMC_SECURE_ARGS) { if (arg == MMC_SECURE_ERASE_ARG)
erase_timeout *= card->ext_csd.sec_erase_mult; else
erase_timeout *= card->ext_csd.sec_trim_mult;
}
erase_timeout *= qty;
/* * Ensure at least a 1 second timeout for SPI as per * 'mmc_set_data_timeout()'
*/ if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
erase_timeout = 1000;
/* for DISCARD none of the below calculation applies. * the busy timeout is 250msec per discard command.
*/ if (arg == SD_DISCARD_ARG) return SD_DISCARD_TIMEOUT_MS;
if (card->ssr.erase_timeout) { /* Erase timeout specified in SD Status Register (SSR) */
erase_timeout = card->ssr.erase_timeout * qty +
card->ssr.erase_offset;
} else { /* * Erase timeout not specified in SD Status Register (SSR) so * use 250ms per write block.
*/
erase_timeout = 250 * qty;
}
/* Must not be less than 1 second */ if (erase_timeout < 1000)
erase_timeout = 1000;
/* * qty is used to calculate the erase timeout which depends on how many * erase groups (or allocation units in SD terminology) are affected. * We count erasing part of an erase group as one erase group. * For SD, the allocation units are always a power of 2. For MMC, the * erase group size is almost certainly also power of 2, but it does not * seem to insist on that in the JEDEC standard, so we fall back to * division in that case. SD may not specify an allocation unit size, * in which case the timeout is based on the number of write blocks. * * Note that the timeout for secure trim 2 will only be correct if the * number of erase groups specified is the same as the total of all * preceding secure trim 1 commands. Since the power may have been * lost since the secure trim 1 commands occurred, it is generally * impossible to calculate the secure trim 2 timeout correctly.
*/ if (card->erase_shift)
qty += ((to >> card->erase_shift) -
(from >> card->erase_shift)) + 1; elseif (mmc_card_sd(card))
qty += to - from + 1; else
qty += (mmc_sector_div(to, card->erase_size) -
mmc_sector_div(from, card->erase_size)) + 1;
if (!mmc_card_blockaddr(card)) {
from <<= 9;
to <<= 9;
}
/* * In case of when R1B + MMC_CAP_WAIT_WHILE_BUSY is used, the polling * shall be avoided.
*/ if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) goto out;
/* Let's poll to find out when the erase operation completes. */
err = mmc_poll_for_busy(card, busy_timeout, false, MMC_BUSY_ERASE);
/* * When the 'card->erase_size' is power of 2, we can use round_up/down() * to align the erase size efficiently.
*/ if (is_power_of_2(card->erase_size)) {
sector_t temp = from_new;
from_new = round_up(temp, card->erase_size);
rem = from_new - temp;
if (nr_new > rem)
nr_new -= rem; else return 0;
nr_new = round_down(nr_new, card->erase_size);
} else {
rem = mmc_sector_mod(from_new, card->erase_size); if (rem) {
rem = card->erase_size - rem;
from_new += rem; if (nr_new > rem)
nr_new -= rem; else return 0;
}
rem = nr_new % card->erase_size; if (rem)
nr_new -= rem;
}
if (nr_new == 0) return 0;
*to = from_new + nr_new;
*from = from_new;
return nr_new;
}
/** * mmc_erase - erase sectors. * @card: card to erase * @from: first sector to erase * @nr: number of sectors to erase * @arg: erase command argument * * Caller must claim host before calling this function.
*/ int mmc_erase(struct mmc_card *card, sector_t from, unsignedint nr, unsignedint arg)
{ unsignedint rem;
sector_t to = from + nr;
int err;
if (!(card->csd.cmdclass & CCC_ERASE)) return -EOPNOTSUPP;
if (mmc_card_mmc(card) && is_trim_arg(arg) &&
!(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)) return -EOPNOTSUPP;
if (arg == MMC_SECURE_ERASE_ARG) { if (mmc_sector_mod(from, card->erase_size) || nr % card->erase_size) return -EINVAL;
}
if (arg == MMC_ERASE_ARG)
nr = mmc_align_erase_size(card, &from, &to, nr);
if (nr == 0) return 0;
if (to <= from) return -EINVAL;
/* 'from' and 'to' are inclusive */
to -= 1;
/* * Special case where only one erase-group fits in the timeout budget: * If the region crosses an erase-group boundary on this particular * case, we will be trimming more than one erase-group which, does not * fit in the timeout budget of the controller, so we need to split it * and call mmc_do_erase() twice if necessary. This special case is * identified by the card->eg_boundary flag.
*/
rem = card->erase_size - mmc_sector_mod(from, card->erase_size); if ((arg & MMC_TRIM_OR_DISCARD_ARGS) && card->eg_boundary && nr > rem) {
err = mmc_do_erase(card, from, from + rem - 1, arg);
from += rem; if ((err) || (to <= from)) return err;
}
return mmc_do_erase(card, from, to, arg);
}
EXPORT_SYMBOL(mmc_erase);
bool mmc_card_can_discard(struct mmc_card *card)
{ /* * As there's no way to detect the discard support bit at v4.5 * use the s/w feature support filed.
*/ return (card->ext_csd.feature_support & MMC_DISCARD_FEATURE);
}
EXPORT_SYMBOL(mmc_card_can_discard);
bool mmc_card_can_sanitize(struct mmc_card *card)
{ if (!mmc_card_can_trim(card) && !mmc_card_can_erase(card)) returnfalse; if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE) returntrue; returnfalse;
}
/* * We should not only use 'host->max_busy_timeout' as the limitation * when deciding the max discard sectors. We should set a balance value * to improve the erase speed, and it can not get too long timeout at * the same time. * * Here we set 'card->pref_erase' as the minimal discard sectors no * matter what size of 'host->max_busy_timeout', but if the * 'host->max_busy_timeout' is large enough for more discard sectors, * then we can continue to increase the max discard sectors until we * get a balance value. In cases when the 'host->max_busy_timeout' * isn't specified, use the default max erase timeout.
*/ do {
y = 0; for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
timeout = mmc_erase_timeout(card, arg, qty + x);
if (qty + x > min_qty && timeout > max_busy_timeout) break;
if (timeout < last_timeout) break;
last_timeout = timeout;
y = x;
}
qty += y;
} while (y);
if (!qty) return 0;
/* * When specifying a sector range to trim, chances are we might cross * an erase-group boundary even if the amount of sectors is less than * one erase-group. * If we can only fit one erase-group in the controller timeout budget, * we have to care that erase-group boundaries are not crossed by a * single trim operation. We flag that special case with "eg_boundary". * In all other cases we can just decrement qty and pretend that we * always touch (qty + 1) erase-groups as a simple optimization.
*/ if (qty == 1)
card->eg_boundary = 1; else
qty--;
/* * Without erase_group_def set, MMC erase timeout depends on clock * frequence which can change. In that case, the best choice is * just the preferred erase size.
*/ if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1)) return card->pref_erase;
if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->card_hw_reset) return;
host->ops->card_hw_reset(host);
}
/** * mmc_hw_reset - reset the card in hardware * @card: card to be reset * * Hard reset the card. This function is only for upper layers, like the * block layer or card drivers. You cannot use it in host drivers (struct * mmc_card might be gone then). * * Return: 0 on success, -errno on failure
*/ int mmc_hw_reset(struct mmc_card *card)
{ struct mmc_host *host = card->host; int ret;
ret = host->bus_ops->hw_reset(host); if (ret < 0)
pr_warn("%s: tried to HW reset card, got error %d\n",
mmc_hostname(host), ret);
return ret;
}
EXPORT_SYMBOL(mmc_hw_reset);
int mmc_sw_reset(struct mmc_card *card)
{ struct mmc_host *host = card->host; int ret;
if (!host->bus_ops->sw_reset) return -EOPNOTSUPP;
ret = host->bus_ops->sw_reset(host); if (ret)
pr_warn("%s: tried to SW reset card, got error %d\n",
mmc_hostname(host), ret);
pr_debug("%s: %s: trying to init card at %u Hz\n",
mmc_hostname(host), __func__, host->f_init);
mmc_power_up(host, host->ocr_avail);
/* * Some eMMCs (with VCCQ always on) may not be reset after power up, so * do a hardware reset if possible.
*/
mmc_hw_reset_for_init(host);
/* * sdio_reset sends CMD52 to reset card. Since we do not know * if the card is being re-initialized, just send it. CMD52 * should be ignored by SD/eMMC cards. * Skip it if we already know that we do not support SDIO commands
*/ if (!(host->caps2 & MMC_CAP2_NO_SDIO))
sdio_reset(host);
mmc_go_idle(host);
if (!(host->caps2 & MMC_CAP2_NO_SD)) { if (mmc_send_if_cond_pcie(host, host->ocr_avail)) goto out; if (mmc_card_sd_express(host)) return 0;
}
/* Order's important: probe SDIO, then SD, then MMC */ if (!(host->caps2 & MMC_CAP2_NO_SDIO)) if (!mmc_attach_sdio(host)) return 0;
if (!(host->caps2 & MMC_CAP2_NO_SD)) if (!mmc_attach_sd(host)) return 0;
if (!(host->caps2 & MMC_CAP2_NO_MMC)) if (!mmc_attach_mmc(host)) return 0;
out:
mmc_power_off(host); return -EIO;
}
int _mmc_detect_card_removed(struct mmc_host *host)
{ int ret;
if (!host->card || mmc_card_removed(host->card)) return 1;
ret = host->bus_ops->alive(host);
/* * Card detect status and alive check may be out of sync if card is * removed slowly, when card detect switch changes while card/slot * pads are still contacted in hardware (refer to "SD Card Mechanical * Addendum, Appendix C: Card Detection Switch"). So reschedule a * detect work 200ms later for this case.
*/ if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
mmc_detect_change(host, msecs_to_jiffies(200));
pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
}
if (ret) {
mmc_card_set_removed(host->card);
pr_debug("%s: card remove detected\n", mmc_hostname(host));
}
return ret;
}
int mmc_detect_card_removed(struct mmc_host *host)
{ struct mmc_card *card = host->card; int ret;
WARN_ON(!host->claimed);
if (!card) return 1;
if (!mmc_card_is_removable(host)) return 0;
ret = mmc_card_removed(card); /* * The card will be considered unchanged unless we have been asked to * detect a change or host requires polling to provide card detection.
*/ if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL)) return ret;
host->detect_change = 0; if (!ret) {
ret = _mmc_detect_card_removed(host); if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) { /* * Schedule a detect work as soon as possible to let a * rescan handle the card removal.
*/
cancel_delayed_work(&host->detect);
_mmc_detect_change(host, 0, false);
}
}
int mmc_card_alternative_gpt_sector(struct mmc_card *card, sector_t *gpt_sector)
{ unsignedint boot_sectors_num;
if ((!(card->host->caps2 & MMC_CAP2_ALT_GPT_TEGRA))) return -EOPNOTSUPP;
/* filter out unrelated cards */ if (card->ext_csd.rev < 3 ||
!mmc_card_mmc(card) ||
!mmc_card_is_blockaddr(card) ||
mmc_card_is_removable(card->host)) return -ENOENT;
/* * eMMC storage has two special boot partitions in addition to the * main one. NVIDIA's bootloader linearizes eMMC boot0->boot1->main * accesses, this means that the partition table addresses are shifted * by the size of boot partitions. In accordance with the eMMC * specification, the boot partition size is calculated as follows: * * boot partition size = 128K byte x BOOT_SIZE_MULT * * Calculate number of sectors occupied by the both boot partitions.
*/
boot_sectors_num = card->ext_csd.raw_boot_mult * SZ_128K /
SZ_512 * MMC_NUM_BOOT_PARTITION;
/* Defined by NVIDIA and used by Android devices. */
*gpt_sector = card->ext_csd.sectors - boot_sectors_num - 1;
/* If there is a non-removable card registered, only scan once */ if (!mmc_card_is_removable(host) && host->rescan_entered) return;
host->rescan_entered = 1;
/* If an SD express card is present, then leave it as is. */ if (mmc_card_sd_express(host)) {
mmc_release_host(host); goto out;
}
/* * Ideally we should favor initialization of legacy SD cards and defer * UHS-II enumeration. However, it seems like cards doesn't reliably * announce their support for UHS-II in the response to the ACMD41, * while initializing the legacy SD interface. Therefore, let's start * with UHS-II for now.
*/ if (!mmc_attach_sd_uhs2(host)) {
mmc_release_host(host); goto out;
}
for (i = 0; i < ARRAY_SIZE(freqs); i++) { unsignedint freq = freqs[i]; if (freq > host->f_max) { if (i + 1 < ARRAY_SIZE(freqs)) continue;
freq = host->f_max;
} if (!mmc_rescan_try_freq(host, max(freq, host->f_min))) break; if (freqs[i] <= host->f_min) break;
}
/* A non-removable card should have been detected by now. */ if (!mmc_card_is_removable(host) && !host->bus_ops)
pr_info("%s: Failed to initialize a non-removable card",
mmc_hostname(host));
/* * Ignore the command timeout errors observed during * the card init as those are excepted.
*/
host->err_stats[MMC_ERR_CMD_TIMEOUT] = 0;
mmc_release_host(host);
out: if (host->caps & MMC_CAP_NEEDS_POLL)
mmc_schedule_delayed_work(&host->detect, HZ);
}
/* clear pm flags now and let card drivers set them as needed */
host->pm_flags = 0;
if (host->bus_ops) { /* Calling bus_ops->remove() with a claimed host can deadlock */
host->bus_ops->remove(host);
mmc_claim_host(host);
mmc_detach_bus(host);
mmc_power_off(host);
mmc_release_host(host); return;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.