// SPDX-License-Identifier: GPL-2.0-only /* * linux/drivers/mmc/core/mmc.c * * Copyright (C) 2003-2004 Russell King, All Rights Reserved. * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved. * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
*/
/* * Given the decoded CSD structure, decode the raw CID to our CID structure.
*/ staticint mmc_decode_cid(struct mmc_card *card)
{
u32 *resp = card->raw_cid;
/* * Add the raw card ID (cid) data to the entropy pool. It doesn't * matter that not all of it is unique, it's just bonus entropy.
*/
add_device_randomness(&card->raw_cid, sizeof(card->raw_cid));
/* * The selection of the format here is based upon published * specs from SanDisk and from what people have reported.
*/ switch (card->csd.mmca_vsn) { case 0: /* MMC v1.0 - v1.2 */ case 1: /* MMC v1.4 */
card->cid.manfid = unstuff_bits(resp, 104, 24);
card->cid.prod_name[0] = unstuff_bits(resp, 96, 8);
card->cid.prod_name[1] = unstuff_bits(resp, 88, 8);
card->cid.prod_name[2] = unstuff_bits(resp, 80, 8);
card->cid.prod_name[3] = unstuff_bits(resp, 72, 8);
card->cid.prod_name[4] = unstuff_bits(resp, 64, 8);
card->cid.prod_name[5] = unstuff_bits(resp, 56, 8);
card->cid.prod_name[6] = unstuff_bits(resp, 48, 8);
card->cid.hwrev = unstuff_bits(resp, 44, 4);
card->cid.fwrev = unstuff_bits(resp, 40, 4);
card->cid.serial = unstuff_bits(resp, 16, 24);
card->cid.month = unstuff_bits(resp, 12, 4);
card->cid.year = unstuff_bits(resp, 8, 4) + 1997; break;
/* * Given a 128-bit response, decode to our card CSD structure.
*/ staticint mmc_decode_csd(struct mmc_card *card)
{ struct mmc_csd *csd = &card->csd; unsignedint e, m, a, b;
u32 *resp = card->raw_csd;
/* * We only understand CSD structure v1.1 and v1.2. * v1.2 has extra information in bits 15, 11 and 10. * We also support eMMC v4.4 & v4.41.
*/
csd->structure = unstuff_bits(resp, 126, 2); if (csd->structure == 0) {
pr_err("%s: unrecognised CSD structure version %d\n",
mmc_hostname(card->host), csd->structure); return -EINVAL;
}
/* * Disable these attributes by default
*/
card->ext_csd.enhanced_area_offset = -EINVAL;
card->ext_csd.enhanced_area_size = -EINVAL;
/* * Enhanced area feature support -- check whether the eMMC * card has the Enhanced area enabled. If so, export enhanced * area offset and size to user by adding sysfs interface.
*/ if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
(ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) { if (card->ext_csd.partition_setting_completed) {
hc_erase_grp_sz =
ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
hc_wp_grp_sz =
ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
/* * calculate the enhanced data area offset, in bytes
*/
card->ext_csd.enhanced_area_offset =
(((unsignedlonglong)ext_csd[139]) << 24) +
(((unsignedlonglong)ext_csd[138]) << 16) +
(((unsignedlonglong)ext_csd[137]) << 8) +
(((unsignedlonglong)ext_csd[136])); if (mmc_card_blockaddr(card))
card->ext_csd.enhanced_area_offset <<= 9; /* * calculate the enhanced data area size, in kilobytes
*/
card->ext_csd.enhanced_area_size =
(ext_csd[142] << 16) + (ext_csd[141] << 8) +
ext_csd[140];
card->ext_csd.enhanced_area_size *=
(size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
card->ext_csd.enhanced_area_size <<= 9;
} else {
pr_warn("%s: defines enhanced area without partition setting complete\n",
mmc_hostname(card->host));
}
}
}
/* * General purpose partition feature support -- * If ext_csd has the size of general purpose partitions, * set size, part_cfg, partition name in mmc_part.
*/ if (ext_csd[EXT_CSD_PARTITION_SUPPORT] &
EXT_CSD_PART_SUPPORT_PART_EN) {
hc_erase_grp_sz =
ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
hc_wp_grp_sz =
ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
/* * The EXT_CSD format is meant to be forward compatible. As long * as CSD_STRUCTURE does not change, all values for EXT_CSD_REV * are authorized, see JEDEC JESD84-B50 section B.8.
*/
card->ext_csd.rev = ext_csd[EXT_CSD_REV];
/* fixup device after ext_csd revision field is updated */
mmc_fixup_device(card, mmc_ext_csd_fixups);
/* * There are two boot regions of equal size, defined in * multiples of 128K.
*/ if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_host_can_access_boot(card->host)) { for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) {
part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
mmc_part_add(card, part_size,
EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx, "boot%d", idx, true,
MMC_BLK_DATA_AREA_BOOT);
}
}
}
/* * Note that the call to mmc_part_add above defaults to read * only. If this default assumption is changed, the call must * take into account the value of boot_locked below.
*/
card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP];
card->ext_csd.boot_ro_lockable = true;
/* * RPMB regions are defined in multiples of 128K.
*/
card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT]; if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_can_cmd23(card->host)) {
mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17,
EXT_CSD_PART_CONFIG_ACC_RPMB, "rpmb", 0, false,
MMC_BLK_DATA_AREA_RPMB);
}
}
/* * GENERIC_CMD6_TIME is to be used "unless a specific timeout is defined * when accessing a specific field", so use it here if there is no * PARTITION_SWITCH_TIME.
*/ if (!card->ext_csd.part_time)
card->ext_csd.part_time = card->ext_csd.generic_cmd6_time; /* Some eMMC set the value too low so set a minimum */ if (card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
/* eMMC v5 or later */ if (card->ext_csd.rev >= 7) {
memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION],
MMC_FIRMWARE_LEN);
card->ext_csd.ffu_capable =
(ext_csd[EXT_CSD_SUPPORTED_MODE] & 0x1) &&
!(ext_csd[EXT_CSD_FW_CONFIG] & 0x1);
staticint mmc_read_ext_csd(struct mmc_card *card)
{
u8 *ext_csd; int err;
if (!mmc_card_can_ext_csd(card)) return 0;
err = mmc_get_ext_csd(card, &ext_csd); if (err) { /* If the host or the card can't do the switch,
* fail more gracefully. */ if ((err != -EINVAL)
&& (err != -ENOSYS)
&& (err != -EFAULT)) return err;
/* * High capacity cards should have this "magic" size * stored in their CSD.
*/ if (card->csd.capacity == (4096 * 512)) {
pr_err("%s: unable to read EXT_CSD on a possible high capacity card. Card will be ignored.\n",
mmc_hostname(card->host));
} else {
pr_warn("%s: unable to read EXT_CSD, performance might suffer\n",
mmc_hostname(card->host));
err = 0;
}
/* * Select the PowerClass for the current bus width * If power class is defined for 4/8 bit bus in the * extended CSD register, select it by executing the * mmc_switch command.
*/ staticint __mmc_select_powerclass(struct mmc_card *card, unsignedint bus_width)
{ struct mmc_host *host = card->host; struct mmc_ext_csd *ext_csd = &card->ext_csd; unsignedint pwrclass_val = 0; int err = 0;
switch (1 << host->ios.vdd) { case MMC_VDD_165_195: if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
pwrclass_val = ext_csd->raw_pwr_cl_26_195; elseif (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
ext_csd->raw_pwr_cl_52_195 :
ext_csd->raw_pwr_cl_ddr_52_195; elseif (host->ios.clock <= MMC_HS200_MAX_DTR)
pwrclass_val = ext_csd->raw_pwr_cl_200_195; break; case MMC_VDD_27_28: case MMC_VDD_28_29: case MMC_VDD_29_30: case MMC_VDD_30_31: case MMC_VDD_31_32: case MMC_VDD_32_33: case MMC_VDD_33_34: case MMC_VDD_34_35: case MMC_VDD_35_36: if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
pwrclass_val = ext_csd->raw_pwr_cl_26_360; elseif (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
ext_csd->raw_pwr_cl_52_360 :
ext_csd->raw_pwr_cl_ddr_52_360; elseif (host->ios.clock <= MMC_HS200_MAX_DTR)
pwrclass_val = (bus_width == EXT_CSD_DDR_BUS_WIDTH_8) ?
ext_csd->raw_pwr_cl_ddr_200_360 :
ext_csd->raw_pwr_cl_200_360; break; default:
pr_warn("%s: Voltage range not supported for power class\n",
mmc_hostname(host)); return -EINVAL;
}
/* If the power class is different from the default value */ if (pwrclass_val > 0) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_POWER_CLASS,
pwrclass_val,
card->ext_csd.generic_cmd6_time);
}
err = __mmc_select_powerclass(card, ext_csd_bits); if (err)
pr_warn("%s: power class selection to bus width %d ddr %d failed\n",
mmc_hostname(host), 1 << bus_width, ddr);
return err;
}
/* * Set the bus speed for the selected speed mode.
*/ staticvoid mmc_set_bus_speed(struct mmc_card *card)
{ unsignedint max_dtr = (unsignedint)-1;
/* * Select the bus width amoung 4-bit and 8-bit(SDR). * If the bus width is changed successfully, return the selected width value. * Zero is returned instead of error value if the wide width is not supported.
*/ staticint mmc_select_bus_width(struct mmc_card *card)
{ staticunsigned ext_csd_bits[] = {
EXT_CSD_BUS_WIDTH_8,
EXT_CSD_BUS_WIDTH_4,
EXT_CSD_BUS_WIDTH_1,
}; staticunsigned bus_widths[] = {
MMC_BUS_WIDTH_8,
MMC_BUS_WIDTH_4,
MMC_BUS_WIDTH_1,
}; struct mmc_host *host = card->host; unsigned idx, bus_width = 0; int err = 0;
if (!mmc_card_can_ext_csd(card) ||
!(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) return 0;
idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 0 : 1;
/* * Unlike SD, MMC cards dont have a configuration register to notify * supported bus width. So bus test command should be run to identify * the supported bus width or compare the ext csd values of current * bus width and ext csd values of 1 bit mode read earlier.
*/ for (; idx < ARRAY_SIZE(bus_widths); idx++) { /* * Host is capable of 8bit transfer, then switch * the device to work in 8bit transfer mode. If the * mmc switch command returns error then switch to * 4bit transfer mode. On success set the corresponding * bus width on the host.
*/
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
ext_csd_bits[idx],
card->ext_csd.generic_cmd6_time); if (err) continue;
/* * If controller can't handle bus width test, * compare ext_csd previously read in 1 bit mode * against ext_csd at new bus width
*/ if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
err = mmc_compare_ext_csds(card, bus_width); else
err = mmc_bus_test(card, bus_width);
if (!err) {
err = bus_width; break;
} else {
pr_warn("%s: switch to bus width %d failed\n",
mmc_hostname(host), 1 << bus_width);
}
}
return err;
}
/* * Switch to the high-speed mode
*/ staticint mmc_select_hs(struct mmc_card *card)
{ int err;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
ext_csd_bits,
card->ext_csd.generic_cmd6_time,
MMC_TIMING_MMC_DDR52, true, true, MMC_CMD_RETRIES); if (err) {
pr_err("%s: switch to bus width %d ddr failed\n",
mmc_hostname(host), 1 << bus_width); return err;
}
/* * eMMC cards can support 3.3V to 1.2V i/o (vccq) * signaling. * * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq. * * 1.8V vccq at 3.3V core voltage (vcc) is not required * in the JEDEC spec for DDR. * * Even (e)MMC card can support 3.3v to 1.2v vccq, but not all * host controller can support this, like some of the SDHCI * controller which connect to an eMMC device. Some of these * host controller still needs to use 1.8v vccq for supporting * DDR mode. * * So the sequence will be: * if (host and device can both support 1.2v IO) * use 1.2v IO; * else if (host and device can both support 1.8v IO) * use 1.8v IO; * so if host and device can only support 3.3v IO, this is the * last choice. * * WARNING: eMMC rules are NOT the same as SD DDR
*/ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120); if (!err) return 0;
}
err = mmc_switch_status(card, true); if (err) goto out_err;
/* Switch HS to HS200 */
val = EXT_CSD_TIMING_HS200 |
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
val, card->ext_csd.generic_cmd6_time, 0, false, true, MMC_CMD_RETRIES); if (err) goto out_err;
mmc_set_timing(host, MMC_TIMING_MMC_HS200);
/* * For HS200, CRC errors are not a reliable way to know the switch * failed. If there really is a problem, we would expect tuning will * fail and the result ends up the same.
*/
err = mmc_switch_status(card, false); if (err) goto out_err;
mmc_set_bus_speed(card);
/* Prepare tuning for HS400 mode. */ if (host->ops->prepare_hs400_tuning)
host->ops->prepare_hs400_tuning(host, &host->ios);
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_2V)
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
/* If fails try again during next card power cycle */ if (err) goto out_err;
err = mmc_select_bus_width(card); if (err != MMC_BUS_WIDTH_8) {
pr_err("%s: switch to 8bit bus width failed, err:%d\n",
mmc_hostname(host), err);
err = err < 0 ? err : -ENOTSUPP; goto out_err;
}
/* Switch card to HS mode */
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
card->ext_csd.generic_cmd6_time, 0, false, true, MMC_CMD_RETRIES); if (err) {
pr_err("%s: switch to hs for hs400es failed, err:%d\n",
mmc_hostname(host), err); goto out_err;
}
/* * Bump to HS timing and frequency. Some cards don't handle * SEND_STATUS reliably at the initial frequency.
*/
mmc_set_timing(host, MMC_TIMING_MMC_HS);
mmc_set_bus_speed(card);
err = mmc_switch_status(card, true); if (err) goto out_err;
/* Switch card to DDR with strobe bit */
val = EXT_CSD_DDR_BUS_WIDTH_8 | EXT_CSD_BUS_WIDTH_STROBE;
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
val,
card->ext_csd.generic_cmd6_time); if (err) {
pr_err("%s: switch to bus width for hs400es failed, err:%d\n",
mmc_hostname(host), err); goto out_err;
}
mmc_select_driver_type(card);
/* Switch card to HS400 */
val = EXT_CSD_TIMING_HS400 |
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, val,
card->ext_csd.generic_cmd6_time, 0, false, true, MMC_CMD_RETRIES); if (err) {
pr_err("%s: switch to hs400es failed, err:%d\n",
mmc_hostname(host), err); goto out_err;
}
/* Set host controller to HS400 timing and frequency */
mmc_set_timing(host, MMC_TIMING_MMC_HS400);
/* Controller enable enhanced strobe function */
host->ios.enhanced_strobe = true; if (host->ops->hs400_enhanced_strobe)
host->ops->hs400_enhanced_strobe(host, &host->ios);
err = mmc_switch_status(card, true); if (err) goto out_err;
/* * For device supporting HS200 mode, the following sequence * should be done before executing the tuning process. * 1. set the desired bus width(4-bit or 8-bit, 1-bit is not supported) * 2. switch to HS200 mode * 3. set the clock to > 52Mhz and <=200MHz
*/ staticint mmc_select_hs200(struct mmc_card *card)
{ struct mmc_host *host = card->host; unsignedint old_timing, old_signal_voltage, old_clock; int err = -EINVAL;
u8 val;
if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V)
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
/* If fails try again during next card power cycle */ if (err) return err;
mmc_select_driver_type(card);
/* * Set the bus width(4 or 8) with host's support and * switch to HS200 mode if bus width is set successfully.
*/
err = mmc_select_bus_width(card); if (err > 0) {
val = EXT_CSD_TIMING_HS200 |
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, val,
card->ext_csd.generic_cmd6_time, 0, false, true, MMC_CMD_RETRIES); if (err) goto err;
/* * Bump to HS timing and frequency. Some cards don't handle * SEND_STATUS reliably at the initial frequency. * NB: We can't move to full (HS200) speeds until after we've * successfully switched over.
*/
old_timing = host->ios.timing;
old_clock = host->ios.clock;
mmc_set_timing(host, MMC_TIMING_MMC_HS200);
mmc_set_clock(card->host, card->ext_csd.hs_max_dtr);
/* * For HS200, CRC errors are not a reliable way to know the * switch failed. If there really is a problem, we would expect * tuning will fail and the result ends up the same.
*/
err = mmc_switch_status(card, false);
/* * mmc_select_timing() assumes timing has not changed if * it is a switch error.
*/ if (err == -EBADMSG) {
mmc_set_clock(host, old_clock);
mmc_set_timing(host, old_timing);
}
}
err: if (err) { /* fall back to the old signal voltage, if fails report error */ if (mmc_set_signal_voltage(host, old_signal_voltage))
err = -EIO;
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200) {
err = mmc_select_hs200(card); if (err == -EBADMSG)
card->mmc_avail_type &= ~EXT_CSD_CARD_TYPE_HS200; else goto out;
}
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
err = mmc_select_hs(card);
out: if (err && err != -EBADMSG) return err;
bus_speed: /* * Set the bus speed to the selected bus timing. * If timing is not selected, backward compatible is the default.
*/
mmc_set_bus_speed(card); return 0;
}
/* * Execute tuning sequence to seek the proper bus operating * conditions for HS200 and HS400, which sends CMD21 to the device.
*/ staticint mmc_hs200_tuning(struct mmc_card *card)
{ struct mmc_host *host = card->host;
/* * Timing should be adjusted to the HS400 target * operation frequency for tuning process
*/ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
host->ios.bus_width == MMC_BUS_WIDTH_8) if (host->ops->prepare_hs400_tuning)
host->ops->prepare_hs400_tuning(host, &host->ios);
return mmc_execute_tuning(card);
}
/* * Handle the detection and initialisation of a card. * * In the case of a resume, "oldcard" will contain the card * we're trying to reinitialise.
*/ staticint mmc_init_card(struct mmc_host *host, u32 ocr, struct mmc_card *oldcard)
{ struct mmc_card *card; int err;
u32 cid[4];
u32 rocr;
WARN_ON(!host->claimed);
/* Set correct bus mode for MMC before attempting init */ if (!mmc_host_is_spi(host))
mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
/* * Since we're changing the OCR value, we seem to * need to tell some cards to go back to the idle * state. We wait 1ms to give cards time to * respond. * mmc_go_idle is needed for eMMC that are asleep
*/
mmc_go_idle(host);
/* The extra bit indicates that we support high capacity */
err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr); if (err) goto err;
/* * For SPI, enable CRC as appropriate.
*/ if (mmc_host_is_spi(host)) {
err = mmc_spi_set_crc(host, use_spi_crc); if (err) goto err;
}
/* * Fetch CID from card.
*/
err = mmc_send_cid(host, cid); if (err) goto err;
if (oldcard) { if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
pr_debug("%s: Perhaps the card was replaced\n",
mmc_hostname(host));
err = -ENOENT; goto err;
}
/* * Call the optional HC's init_card function to handle quirks.
*/ if (host->ops->init_card)
host->ops->init_card(host, card);
/* * For native busses: set card RCA and quit open drain mode.
*/ if (!mmc_host_is_spi(host)) {
err = mmc_set_relative_addr(card); if (err) goto free_card;
mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
}
if (!oldcard) { /* * Fetch CSD from card.
*/
err = mmc_send_csd(card, card->raw_csd); if (err) goto free_card;
err = mmc_decode_csd(card); if (err) goto free_card;
err = mmc_decode_cid(card); if (err) goto free_card;
}
/* * handling only for cards supporting DSR and hosts requesting * DSR configuration
*/ if (card->csd.dsr_imp && host->dsr_req)
mmc_set_dsr(host);
/* * Select card, as all following commands rely on that.
*/ if (!mmc_host_is_spi(host)) {
err = mmc_select_card(card); if (err) goto free_card;
}
if (!oldcard) { /* Read extended CSD. */
err = mmc_read_ext_csd(card); if (err) goto free_card;
/* * If doing byte addressing, check if required to do sector * addressing. Handle the case of <2GB cards needing sector * addressing. See section 8.1 JEDEC Standard JED84-A441; * ocr register has bit 30 set for sector addressing.
*/ if (rocr & BIT(30))
mmc_card_set_blockaddr(card);
/* Erase size depends on CSD and Extended CSD */
mmc_set_erase_size(card);
}
/* * Reselect the card type since host caps could have been changed when * debugging even if the card is not new.
*/
mmc_select_card_type(card);
/* Enable ERASE_GRP_DEF. This bit is lost after a reset or power off. */ if (card->ext_csd.rev >= 3) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_ERASE_GROUP_DEF, 1,
card->ext_csd.generic_cmd6_time);
if (err && err != -EBADMSG) goto free_card;
if (err) { /* * Just disable enhanced area off & sz * will try to enable ERASE_GROUP_DEF * during next time reinit
*/
card->ext_csd.enhanced_area_offset = -EINVAL;
card->ext_csd.enhanced_area_size = -EINVAL;
} else {
card->ext_csd.erase_group_def = 1; /* * enable ERASE_GRP_DEF successfully. * This will affect the erase size, so * here need to reset erase size
*/
mmc_set_erase_size(card);
}
}
mmc_set_wp_grp_size(card); /* * Ensure eMMC user default partition is enabled
*/ if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) {
card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
card->ext_csd.part_config,
card->ext_csd.part_time); if (err && err != -EBADMSG) goto free_card;
}
/* * Enable power_off_notification byte in the ext_csd register
*/ if (card->ext_csd.rev >= 6) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_POWER_OFF_NOTIFICATION,
EXT_CSD_POWER_ON,
card->ext_csd.generic_cmd6_time); if (err && err != -EBADMSG) goto free_card;
/* * The err can be -EBADMSG or 0, * so check for success and update the flag
*/ if (!err)
card->ext_csd.power_off_notification = EXT_CSD_POWER_ON;
}
/* set erase_arg */ if (mmc_card_can_discard(card))
card->erase_arg = MMC_DISCARD_ARG; elseif (mmc_card_can_trim(card))
card->erase_arg = MMC_TRIM_ARG; else
card->erase_arg = MMC_ERASE_ARG;
/* * If cache size is higher than 0, this indicates the existence of cache * and it can be turned on. Note that some eMMCs from Micron has been * reported to need ~800 ms timeout, while enabling the cache after * sudden power failure tests. Let's extend the timeout to a minimum of * DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards.
*/ if (card->ext_csd.cache_size > 0) { unsignedint timeout_ms = MIN_CACHE_EN_TIMEOUT_MS;
/* * Only if no error, cache is turned on successfully.
*/ if (err) {
pr_warn("%s: Cache is supported, but failed to turn on (%d)\n",
mmc_hostname(card->host), err);
card->ext_csd.cache_ctrl = 0;
} else {
card->ext_csd.cache_ctrl = 1;
}
}
/* * Enable Command Queue if supported. Note that Packed Commands cannot * be used with Command Queue.
*/
card->ext_csd.cmdq_en = false; if (card->ext_csd.cmdq_support && host->caps2 & MMC_CAP2_CQE) {
err = mmc_cmdq_enable(card); if (err && err != -EBADMSG) goto free_card; if (err) {
pr_warn("%s: Enabling CMDQ failed\n",
mmc_hostname(card->host));
card->ext_csd.cmdq_support = false;
card->ext_csd.cmdq_depth = 0;
}
} /* * In some cases (e.g. RPMB or mmc_test), the Command Queue must be * disabled for a time, so a flag is needed to indicate to re-enable the * Command Queue.
*/
card->reenable_cmdq = card->ext_csd.cmdq_en;
if (host->cqe_ops && !host->cqe_enabled) {
err = host->cqe_ops->cqe_enable(host, card); if (!err) {
host->cqe_enabled = true;
err = mmc_wait_for_cmd(host, &cmd, 0); if (err) goto out_release;
/* * If the host does not wait while the card signals busy, then we can * try to poll, but only if the host supports HW polling, as the * SEND_STATUS cmd is not allowed. If we can't poll, then we simply need * to wait the sleep/awake timeout.
*/ if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp) goto out_release;
if (!host->ops->card_busy) {
mmc_delay(timeout_ms); goto out_release;
}
/* * Host is being removed. Free up the current card and do a graceful power-off.
*/ staticvoid mmc_remove(struct mmc_host *host)
{
get_device(&host->card->dev);
mmc_remove_card(host->card);
err = _mmc_suspend(host, MMC_POWEROFF_SUSPEND); if (!err) {
pm_runtime_disable(&host->card->dev);
pm_runtime_set_suspended(&host->card->dev);
}
return err;
}
/* * This function tries to determine if the same card is still present * and, if so, restore all state to it.
*/ staticint _mmc_resume(struct mmc_host *host)
{ int err = 0;
/* * If the card remains suspended at this point and it was done by using * the sleep-cmd (CMD5), we may need to re-initialize it first, to allow * us to send the preferred poweroff-notification cmd at shutdown.
*/ if (mmc_card_can_poweroff_notify(host->card) &&
!mmc_host_can_poweroff_notify(host, MMC_POWEROFF_SUSPEND))
err = _mmc_resume(host);
if (!err)
err = _mmc_suspend(host, MMC_POWEROFF_SHUTDOWN);
/* * In the case of recovery, we can't expect flushing the cache to work * always, but we have a go and ignore errors.
*/
_mmc_flush_cache(host);
if ((host->caps & MMC_CAP_HW_RESET) && host->ops->card_hw_reset &&
mmc_card_can_reset(card)) { /* If the card accept RST_n signal, send it. */
mmc_set_clock(host, host->f_init);
host->ops->card_hw_reset(host); /* Set initial state and call mmc_set_ios */
mmc_set_initial_state(host);
} else { /* Do a brute force power cycle */
mmc_power_cycle(host, card->ocr);
mmc_pwrseq_reset(host);
} return mmc_init_card(host, card->ocr, card);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.