// SPDX-License-Identifier: GPL-2.0 /* * Block driver for media (i.e., flash cards) * * Copyright 2002 Hewlett-Packard Company * Copyright 2005-2008 Pierre Ossman * * Use consistent with the GNU GPL is permitted, * provided that this copyright notice is * preserved in its entirety in all copies and derived works. * * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS * FITNESS FOR ANY PARTICULAR PURPOSE. * * Many thanks to Alessandro Rubini and Jonathan Corbet! * * Author: Andrew Christian * 28 May 2002
*/ #include <linux/moduleparam.h> #include <linux/module.h> #include <linux/init.h>
/* * Set a 10 second timeout for polling write request busy state. Note, mmc core * is setting a 3 second timeout for SD cards, and SDHCI has long had a 10 * second software timer to timeout the whole request, so 10 seconds should be * ample.
*/ #define MMC_BLK_TIMEOUT_MS (10 * 1000) #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) #define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8)
/** * struct rpmb_frame - rpmb frame as defined by eMMC 5.1 (JESD84-B51) * * @stuff : stuff bytes * @key_mac : The authentication key or the message authentication * code (MAC) depending on the request/response type. * The MAC will be delivered in the last (or the only) * block of data. * @data : Data to be written or read by signed access. * @nonce : Random number generated by the host for the requests * and copied to the response by the RPMB engine. * @write_counter: Counter value for the total amount of the successful * authenticated data write requests made by the host. * @addr : Address of the data to be programmed to or read * from the RPMB. Address is the serial number of * the accessed block (half sector 256B). * @block_count : Number of blocks (half sectors, 256B) requested to be * read/programmed. * @result : Includes information about the status of the write counter * (valid, expired) and result of the access made to the RPMB. * @req_resp : Defines the type of request and response to/from the memory. * * The stuff bytes and big-endian properties are modeled to fit to the spec.
*/ struct rpmb_frame {
u8 stuff[196];
u8 key_mac[32];
u8 data[256];
u8 nonce[16];
__be32 write_counter;
__be16 addr;
__be16 block_count;
__be16 result;
__be16 req_resp;
} __packed;
#define RPMB_PROGRAM_KEY 0x1 /* Program RPMB Authentication Key */ #define RPMB_GET_WRITE_COUNTER 0x2 /* Read RPMB write counter */ #define RPMB_WRITE_DATA 0x3 /* Write data to RPMB partition */ #define RPMB_READ_DATA 0x4 /* Read data from RPMB partition */ #define RPMB_RESULT_READ 0x5 /* Read result request (Internal) */
static DEFINE_MUTEX(block_mutex);
/* * The defaults come from config options but can be overriden by module * or bootarg options.
*/ staticint perdev_minors = CONFIG_MMC_BLOCK_MINORS;
/* * We've only got one major, so number of mmcblk devices is * limited to (1 << 20) / number of minors per device. It is also * limited by the MAX_DEVICES below.
*/ staticint max_devices;
/* * Only set in main mmc_blk_data associated * with mmc_card with dev_set_drvdata, and keeps * track of the current selected device partition.
*/ unsignedint part_curr; #define MMC_BLK_PART_INVALID UINT_MAX /* Unknown partition active */ int area_type;
/* debugfs files (only in main mmc_blk_data) */ struct dentry *status_dentry; struct dentry *ext_csd_dentry;
};
/* Device type for RPMB character devices */ static dev_t mmc_rpmb_devt;
/* Bus type for RPMB character devices */ staticconststruct bus_type mmc_rpmb_bus_type = {
.name = "mmc_rpmb",
};
/** * struct mmc_rpmb_data - special RPMB device type for these areas * @dev: the device for the RPMB area * @chrdev: character device for the RPMB area * @id: unique device ID number * @part_index: partition index (0 on first) * @md: parent MMC block device * @rdev: registered RPMB device * @node: list item, so we can put this device on a list
*/ struct mmc_rpmb_data { struct device dev; struct cdev chrdev; int id; unsignedint part_index; struct mmc_blk_data *md; struct rpmb_dev *rdev; struct list_head node;
};
static DEFINE_MUTEX(open_lock);
module_param(perdev_minors, int, 0444);
MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
if (idata->flags & MMC_BLK_IOC_SBC && i > 0)
prev_idata = idatas[i - 1];
/* * The RPMB accesses comes in from the character device, so we * need to target these explicitly. Else we just target the * partition type for the block device the ioctl() was issued * on.
*/ if (idata->rpmb) { /* Support multiple RPMB partitions */
target_part = idata->rpmb->part_index;
target_part |= EXT_CSD_PART_CONFIG_ACC_RPMB;
} else {
target_part = md->part_type;
}
if (idata->ic.write_flag)
data.flags = MMC_DATA_WRITE; else
data.flags = MMC_DATA_READ;
/* data.flags must already be set before doing this. */
mmc_set_data_timeout(&data, card);
/* Allow overriding the timeout_ns for empirical tuning. */ if (idata->ic.data_timeout_ns)
data.timeout_ns = idata->ic.data_timeout_ns;
mrq.data = &data;
}
mrq.cmd = &cmd;
err = mmc_blk_part_switch(card, target_part); if (err) return err;
if (idata->ic.is_acmd) {
err = mmc_app_cmd(card->host, card); if (err) return err;
}
if (idata->rpmb || prev_idata) {
sbc.opcode = MMC_SET_BLOCK_COUNT; /* * We don't do any blockcount validation because the max size * may be increased by a future standard. We just copy the * 'Reliable Write' bit here.
*/
sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31)); if (prev_idata)
sbc.arg = prev_idata->ic.arg;
sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
mrq.sbc = &sbc;
}
/* If it's an R1B response we need some more preparations. */
busy_timeout_ms = idata->ic.cmd_timeout_ms ? : MMC_BLK_TIMEOUT_MS;
r1b_resp = (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B; if (r1b_resp)
mmc_prepare_busy_cmd(card->host, &cmd, busy_timeout_ms);
if (prev_idata) {
memcpy(&prev_idata->ic.response, sbc.resp, sizeof(sbc.resp)); if (sbc.error) {
dev_err(mmc_dev(card->host), "%s: sbc error %d\n",
__func__, sbc.error); return sbc.error;
}
}
if (cmd.error) {
dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
__func__, cmd.error); return cmd.error;
} if (data.error) {
dev_err(mmc_dev(card->host), "%s: data error %d\n",
__func__, data.error); return data.error;
}
/* * Make sure the cache of the PARTITION_CONFIG register and * PARTITION_ACCESS bits is updated in case the ioctl ext_csd write * changed it successfully.
*/ if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_PART_CONFIG) &&
(cmd.opcode == MMC_SWITCH)) { struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg);
/* * Update cache so the next mmc_blk_part_switch call operates * on up-to-date data.
*/
card->ext_csd.part_config = value;
main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK;
}
/* * Make sure to update CACHE_CTRL in case it was changed. The cache * will get turned back on if the card is re-initialized, e.g. * suspend/resume or hw reset in recovery.
*/ if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_CACHE_CTRL) &&
(cmd.opcode == MMC_SWITCH)) {
u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg) & 1;
card->ext_csd.cache_ctrl = value;
}
/* * According to the SD specs, some commands require a delay after * issuing the command.
*/ if (idata->ic.postsleep_min_us)
usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
if (mmc_host_is_spi(card->host)) { if (idata->ic.write_flag || r1b_resp || cmd.flags & MMC_RSP_SPI_BUSY) return mmc_spi_err_check(card); return err;
}
/* * Ensure RPMB, writes and R1B responses are completed by polling with * CMD13. Note that, usually we don't need to poll when using HW busy * detection, but here it's needed since some commands may indicate the * error through the R1 status bits.
*/ if (idata->rpmb || idata->ic.write_flag || r1b_resp) { struct mmc_blk_busy_data cb_data = {
.card = card,
};
idata = mmc_blk_ioctl_copy_from_user(ic_ptr); if (IS_ERR(idata)) return PTR_ERR(idata); /* This will be NULL on non-RPMB ioctl():s */
idata->rpmb = rpmb;
if (copy_from_user(&num_of_cmds, &user->num_of_cmds, sizeof(num_of_cmds))) return -EFAULT;
if (!num_of_cmds) return 0;
if (num_of_cmds > MMC_IOC_MAX_CMDS) return -EINVAL;
n = num_of_cmds;
idata = kcalloc(n, sizeof(*idata), GFP_KERNEL); if (!idata) return -ENOMEM;
for (i = 0; i < n; i++) {
idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]); if (IS_ERR(idata[i])) {
err = PTR_ERR(idata[i]);
n = i; goto cmd_err;
} /* This will be NULL on non-RPMB ioctl():s */
idata[i]->rpmb = rpmb;
}
/* copy to user if data and response */ for (i = 0; i < n && !err; i++)
err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
blk_mq_free_request(req);
cmd_err: for (i = 0; i < n; i++) {
kfree(idata[i]->buf);
kfree(idata[i]);
}
kfree(idata); return ioc_err ? ioc_err : err;
}
staticint mmc_blk_check_blkdev(struct block_device *bdev)
{ /* * The caller must have CAP_SYS_RAWIO, and must be calling this on the * whole block device, not on a partition. This prevents overspray * between sibling partitions.
*/ if (!capable(CAP_SYS_RAWIO) || bdev_is_partition(bdev)) return -EPERM; return 0;
}
if (mmc_card_ult_capacity(card)) { /* * Normally, ACMD22 returns the number of written sectors as * u32. SDUC, however, returns it as u64. This is not a * superfluous requirement, because SDUC writes may exceed 2TB. * For Linux mmc however, the previously write operation could * not be more than the block layer limits, thus just make room * for a u64 and cast the response back to u32.
*/
result = clamp_val(get_unaligned_be64(blocks), 0, UINT_MAX);
} else {
result = ntohl(*blocks);
}
kfree(blocks);
if (data->timeout_clks) {
khz = mmc_blk_clock_khz(host);
ms += DIV_ROUND_UP(data->timeout_clks, khz);
}
return ms;
}
/* * Attempts to reset the card and get back to the requested partition. * Therefore any error here must result in cancelling the block layer * request, it must not be reattempted without going through the mmc_blk * partition sanity checks.
*/ staticint mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, int type)
{ int err; struct mmc_blk_data *main_md = dev_get_drvdata(&host->card->dev);
if (md->reset_done & type) return -EEXIST;
md->reset_done |= type;
err = mmc_hw_reset(host->card); /* * A successful reset will leave the card in the main partition, but * upon failure it might not be, so set it to MMC_BLK_PART_INVALID * in that case.
*/
main_md->part_curr = err ? MMC_BLK_PART_INVALID : main_md->part_type; if (err) return err; /* Ensure we switch back to the correct partition */ if (mmc_blk_part_switch(host->card, md->part_type)) /* * We have failed to get back into the correct * partition, so we need to abort the whole request.
*/ return -ENODEV; return 0;
}
for (i = 1; i < mq_rq->ioc_count; i++) { if (idata[i - 1]->ic.opcode == MMC_SET_BLOCK_COUNT &&
mmc_op_multi(idata[i]->ic.opcode)) {
idata[i - 1]->flags |= MMC_BLK_IOC_DROP;
idata[i]->flags |= MMC_BLK_IOC_SBC;
}
}
}
/* * The non-block commands come back from the block layer after it queued it and * processed it with all other requests and then they get issued in this * function.
*/ staticvoid mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
{ struct mmc_queue_req *mq_rq; struct mmc_card *card = mq->card; struct mmc_blk_data *md = mq->blkdata; struct mmc_blk_ioc_data **idata; bool rpmb_ioctl;
u8 **ext_csd;
u32 status; int ret; int i;
switch (mq_rq->drv_op) { case MMC_DRV_OP_IOCTL: if (card->ext_csd.cmdq_en) {
ret = mmc_cmdq_disable(card); if (ret) break;
}
mmc_blk_check_sbc(mq_rq);
fallthrough; case MMC_DRV_OP_IOCTL_RPMB:
idata = mq_rq->drv_op_data; for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) {
ret = __mmc_blk_ioctl_cmd(card, md, idata, i); if (ret) break;
} /* Always switch back to main area after RPMB access */ if (rpmb_ioctl)
mmc_blk_part_switch(card, 0); elseif (card->reenable_cmdq && !card->ext_csd.cmdq_en)
mmc_cmdq_enable(card); break; case MMC_DRV_OP_BOOT_WP:
ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
card->ext_csd.boot_ro_lock |
EXT_CSD_BOOT_WP_B_PWR_WP_EN,
card->ext_csd.part_time); if (ret)
pr_err("%s: Locking boot partition ro until next power on failed: %d\n",
md->disk->disk_name, ret); else
card->ext_csd.boot_ro_lock |=
EXT_CSD_BOOT_WP_B_PWR_WP_EN; break; case MMC_DRV_OP_GET_CARD_STATUS:
ret = mmc_send_status(card, &status); if (!ret)
ret = status; break; case MMC_DRV_OP_GET_EXT_CSD:
ext_csd = mq_rq->drv_op_data;
ret = mmc_get_ext_csd(card, ext_csd); break; default:
pr_err("%s: unknown driver specific operation\n",
md->disk->disk_name);
ret = -EINVAL; break;
}
mq_rq->drv_op_result = ret;
blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
}
ret = mmc_flush_cache(card->host);
blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
}
/* * Reformat current write as a reliable write, supporting * both legacy and the enhanced reliable write MMC cards. * In each transfer we'll handle only as much as a single * reliable write can handle, thus finish the request in * partial completions.
*/ staticinlinevoid mmc_apply_rel_rw(struct mmc_blk_request *brq, struct mmc_card *card, struct request *req)
{ if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) { /* Legacy mode imposes restrictions on transfers. */ if (!IS_ALIGNED(blk_rq_pos(req), card->ext_csd.rel_sectors))
brq->data.blocks = 1;
/* * Per the SD specification(physical layer version 4.10)[1], * section 4.3.3, it explicitly states that "When the last * block of user area is read using CMD18, the host should * ignore OUT_OF_RANGE error that may occur even the sequence * is correct". And JESD84-B51 for eMMC also has a similar * statement on section 6.8.3. * * Multiple block read/write could be done by either predefined * method, namely CMD23, or open-ending mode. For open-ending mode, * we should ignore the OUT_OF_RANGE error as it's normal behaviour. * * However the spec[1] doesn't tell us whether we should also * ignore that for predefined method. But per the spec[1], section * 4.15 Set Block Count Command, it says"If illegal block count * is set, out of range error will be indicated during read/write * operation (For example, data transfer is stopped at user area * boundary)." In another word, we could expect a out of range error * in the response for the following CMD18/25. And if argument of * CMD23 + the argument of CMD18/25 exceed the max number of blocks, * we could also expect to get a -ETIMEDOUT or any error number from * the host drivers due to missing data response(for write)/data(for * read), as the cards will stop the data transfer by itself per the * spec. So we only need to check R1_OUT_OF_RANGE for open-ending mode.
*/
if (!brq->stop.error) { bool oor_with_open_end; /* If there is no error yet, check R1 response */
val = brq->stop.resp[0] & CMD_ERRORS;
oor_with_open_end = val & R1_OUT_OF_RANGE && !brq->mrq.sbc;
if (val && !oor_with_open_end)
brq->stop.error = -EIO;
}
}
/* * Reliable writes are used to implement Forced Unit Access and * are supported only on MMCs.
*/
do_rel_wr = (req->cmd_flags & REQ_FUA) &&
rq_data_dir(req) == WRITE &&
(md->flags & MMC_BLK_REL_WR);
/* * The command queue supports 2 priorities: "high" (1) and "simple" (0). * The eMMC will give "high" priority tasks priority over "simple" * priority tasks. Here we always set "simple" priority by not setting * MMC_DATA_PRIO.
*/
/* * The block layer doesn't support all sector count * restrictions, so we need to be prepared for too big * requests.
*/ if (brq->data.blocks > card->host->max_blk_count)
brq->data.blocks = card->host->max_blk_count;
if (brq->data.blocks > 1) { /* * Some SD cards in SPI mode return a CRC error or even lock up * completely when trying to read the last block using a * multiblock read command.
*/ if (mmc_host_is_spi(card->host) && (rq_data_dir(req) == READ) &&
(blk_rq_pos(req) + blk_rq_sectors(req) ==
get_capacity(md->disk)))
brq->data.blocks--;
/* * After a read error, we redo the request one (native) sector * at a time in order to accurately determine which * sectors can be read successfully.
*/ if (recovery_mode)
brq->data.blocks = queue_physical_block_size(mq->queue) >> 9;
/* * Some controllers have HW issues while operating * in multiple I/O mode
*/ if (card->host->ops->multi_io_quirk)
brq->data.blocks = card->host->ops->multi_io_quirk(card,
(rq_data_dir(req) == READ) ?
MMC_DATA_READ : MMC_DATA_WRITE,
brq->data.blocks);
}
if (do_rel_wr) {
mmc_apply_rel_rw(brq, card, req);
brq->data.flags |= MMC_DATA_REL_WR;
}
/* * Data tag is used only during writing meta data to speed * up write and any subsequent read of this meta data
*/
do_data_tag = card->ext_csd.data_tag_unit_size &&
(req->cmd_flags & REQ_META) &&
(rq_data_dir(req) == WRITE) &&
((brq->data.blocks * brq->data.blksz) >=
card->ext_csd.data_tag_unit_size);
if (do_data_tag)
brq->data.flags |= MMC_DATA_DAT_TAG;
/* * Adjust the sg list so it is the same size as the * request.
*/ if (brq->data.blocks != blk_rq_sectors(req)) { int i, data_size = brq->data.blocks << 9; struct scatterlist *sg;
/* * Block layer timeouts race with completions which means the normal * completion path cannot be used during recovery.
*/ if (mq->in_recovery)
mmc_blk_cqe_complete_rq(mq, req); elseif (likely(!blk_should_fake_timeout(req->q)))
blk_mq_complete_request(req);
}
if (brq->data.blocks > 1 || do_rel_wr) { /* SPI multiblock writes terminate using a special * token, not a STOP_TRANSMISSION request.
*/ if (!mmc_host_is_spi(card->host) ||
rq_data_dir(req) == READ)
brq->mrq.stop = &brq->stop;
readcmd = MMC_READ_MULTIPLE_BLOCK;
writecmd = MMC_WRITE_MULTIPLE_BLOCK;
} else {
brq->mrq.stop = NULL;
readcmd = MMC_READ_SINGLE_BLOCK;
writecmd = MMC_WRITE_BLOCK;
}
brq->cmd.opcode = rq_data_dir(req) == READ ? readcmd : writecmd;
/* * Pre-defined multi-block transfers are preferable to * open ended-ones (and necessary for reliable writes). * However, it is not sufficient to just send CMD23, * and avoid the final CMD12, as on an error condition * CMD12 (stop) needs to be sent anyway. This, coupled * with Auto-CMD23 enhancements provided by some * hosts, means that the complexity of dealing * with this is best left to the host. If CMD23 is * supported by card and host, we'll fill sbc in and let * the host deal with handling it correctly. This means * that for hosts that don't expose MMC_CAP_CMD23, no * change of behavior will be observed. * * N.B: Some MMC cards experience perf degradation. * We'll avoid using CMD23-bounded multiblock writes for * these, while retaining features like reliable writes.
*/ if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
(do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
do_data_tag)) {
brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
brq->sbc.arg = brq->data.blocks |
(do_rel_wr ? (1 << 31) : 0) |
(do_data_tag ? (1 << 29) : 0);
brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
brq->mrq.sbc = &brq->sbc;
}
/* * Requests are completed by mmc_blk_mq_complete_rq() which sets simple * policy: * 1. A request that has transferred at least some data is considered * successful and will be requeued if there is remaining data to * transfer. * 2. Otherwise the number of retries is incremented and the request * will be requeued if there are remaining retries. * 3. Otherwise the request will be errored out. * That means mmc_blk_mq_complete_rq() is controlled by bytes_xfered and * mqrq->retries. So there are only 4 possible actions here: * 1. do not accept the bytes_xfered value i.e. set it to zero * 2. change mqrq->retries to determine the number of retries * 3. try to reset the card * 4. read one sector at a time
*/ staticvoid mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
{ int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); struct mmc_blk_request *brq = &mqrq->brq; struct mmc_blk_data *md = mq->blkdata; struct mmc_card *card = mq->card;
u32 status;
u32 blocks; int err;
/* * Some errors the host driver might not have seen. Set the number of * bytes transferred to zero in that case.
*/
err = __mmc_send_status(card, &status, 0); if (err || mmc_blk_status_error(req, status))
brq->data.bytes_xfered = 0;
mmc_retune_release(card->host);
/* * Try again to get the status. This also provides an opportunity for * re-tuning.
*/ if (err)
err = __mmc_send_status(card, &status, 0);
/* * Nothing more to do after the number of bytes transferred has been * updated and there is no card.
*/ if (err && mmc_detect_card_removed(card->host)) return;
/* Try to get back to "tran" state */ if (!mmc_host_is_spi(mq->card->host) &&
(err || !mmc_ready_for_data(status)))
err = mmc_blk_fix_state(mq->card, req);
/* * Special case for SD cards where the card might record the number of * blocks written.
*/ if (!err && mmc_blk_cmd_started(brq) && mmc_card_sd(card) &&
rq_data_dir(req) == WRITE) { if (mmc_sd_num_wr_blocks(card, &blocks))
brq->data.bytes_xfered = 0; else
brq->data.bytes_xfered = blocks << 9;
}
/* Reset if the card is in a bad state */ if (!mmc_host_is_spi(mq->card->host) &&
err && mmc_blk_reset(md, card->host, type)) {
pr_err("%s: recovery failed!\n", req->q->disk->disk_name);
mqrq->retries = MMC_NO_RETRIES; return;
}
/* * If anything was done, just return and if there is anything remaining * on the request it will get requeued.
*/ if (brq->data.bytes_xfered) return;
/* Reset before last retry */ if (mqrq->retries + 1 == MMC_MAX_RETRIES &&
mmc_blk_reset(md, card->host, type)) return;
/* Command errors fail fast, so use all MMC_MAX_RETRIES */ if (brq->sbc.error || brq->cmd.error) return;
/* Reduce the remaining retries for data errors */ if (mqrq->retries < MMC_MAX_RETRIES - MMC_DATA_RETRIES) {
mqrq->retries = MMC_MAX_RETRIES - MMC_DATA_RETRIES; return;
}
if (rq_data_dir(req) == READ && brq->data.blocks >
queue_physical_block_size(mq->queue) >> 9) { /* Read one (native) sector at a time */
mmc_blk_read_single(mq, req); return;
}
}
staticint mmc_spi_err_check(struct mmc_card *card)
{
u32 status = 0; int err;
/* * SPI does not have a TRAN state we have to wait on, instead the * card is ready again when it no longer holds the line LOW. * We still have to ensure two things here before we know the write * was successful: * 1. The card has not disconnected during busy and we actually read our * own pull-up, thinking it was still connected, so ensure it * still responds. * 2. Check for any error bits, in particular R1_SPI_IDLE to catch a * just reconnected card after being disconnected during busy.
*/
err = __mmc_send_status(card, &status, 0); if (err) return err; /* All R1 and R2 bits of SPI are errors in our case */ if (status) return -EIO; return 0;
}
staticint mmc_blk_busy_cb(void *cb_data, bool *busy)
{ struct mmc_blk_busy_data *data = cb_data;
u32 status = 0; int err;
err = mmc_send_status(data->card, &status); if (err) return err;
/* * Do not assume data transferred correctly if there are any error bits * set.
*/ if (cb_data.status & mmc_blk_stop_err_bits(&mqrq->brq)) {
mqrq->brq.data.bytes_xfered = 0;
err = err ? err : -EIO;
}
/* Copy the exception bit so it will be seen later on */ if (mmc_card_mmc(card) && cb_data.status & R1_EXCEPTION_EVENT)
mqrq->brq.cmd.resp[0] |= R1_EXCEPTION_EVENT;
return err;
}
staticinlinevoid mmc_blk_rw_reset_success(struct mmc_queue *mq, struct request *req)
{ int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
/* * Block layer timeouts race with completions which means the normal * completion path cannot be used during recovery.
*/ if (mq->in_recovery)
mmc_blk_cqe_complete_rq(mq, req); elseif (likely(!blk_should_fake_timeout(req->q)))
blk_mq_complete_request(req);
}
/* * Block layer timeouts race with completions which means the normal * completion path cannot be used during recovery.
*/ if (mq->in_recovery) {
mmc_blk_mq_complete_rq(mq, req);
} elseif (likely(!blk_should_fake_timeout(req->q))) { if (can_sleep)
blk_mq_complete_request_direct(req, mmc_blk_mq_complete); else
blk_mq_complete_request(req);
}
if (!mmc_host_can_done_complete(host)) { bool waiting;
/* * We cannot complete the request in this context, so record * that there is a request to complete, and that a following * request does not need to wait (although it does need to * complete complete_req first).
*/
spin_lock_irqsave(&mq->lock, flags);
mq->complete_req = req;
mq->rw_wait = false;
waiting = mq->waiting;
spin_unlock_irqrestore(&mq->lock, flags);
/* * If 'waiting' then the waiting task will complete this * request, otherwise queue a work to do it. Note that * complete_work may still race with the dispatch of a following * request.
*/ if (waiting)
wake_up(&mq->wait); else
queue_work(mq->card->complete_wq, &mq->complete_work);
return;
}
/* Take the recovery path for errors or urgent background operations */ if (mmc_blk_rq_error(&mqrq->brq) ||
mmc_blk_urgent_bkops_needed(mq, mqrq)) {
spin_lock_irqsave(&mq->lock, flags);
mq->recovery_needed = true;
mq->recovery_req = req;
spin_unlock_irqrestore(&mq->lock, flags);
wake_up(&mq->wait);
schedule_work(&mq->recovery_work); return;
}
/* * Wait while there is another request in progress, but not if recovery * is needed. Also indicate whether there is a request waiting to start.
*/
spin_lock_irqsave(&mq->lock, flags); if (mq->recovery_needed) {
*err = -EBUSY;
done = true;
} else {
done = !mq->rw_wait;
}
mq->waiting = !done;
spin_unlock_irqrestore(&mq->lock, flags);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.