/* Common for multiple and single block requests */ struct usdhi6_page pg; /* current page from an SG */ void *blk_page; /* either a mapped page, or the bounce buffer */
size_t offset; /* offset within a page, including sg->offset */
if (host->io_error &
(USDHI6_SD_INFO2_RSP_TOUT | USDHI6_SD_INFO2_TOUT)) {
u32 rsp54 = usdhi6_read(host, USDHI6_SD_RSP54); int opc = host->mrq ? host->mrq->cmd->opcode : -1;
err = usdhi6_read(host, USDHI6_SD_ERR_STS2); /* Response timeout is often normal, don't spam the log */ if (host->wait == USDHI6_WAIT_FOR_CMD)
dev_dbg(mmc_dev(host->mmc), "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n",
err, rsp54, host->wait, opc); else
dev_warn(mmc_dev(host->mmc), "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n",
err, rsp54, host->wait, opc); return -ETIMEDOUT;
}
err = usdhi6_read(host, USDHI6_SD_ERR_STS1); if (err != USDHI6_SD_ERR_STS1_CRC_NO_ERROR)
dev_warn(mmc_dev(host->mmc), "Err sts 0x%x, state %u, CMD%d\n",
err, host->wait, host->mrq ? host->mrq->cmd->opcode : -1); if (host->io_error & USDHI6_SD_INFO2_ILA) return -EILSEQ;
return -EIO;
}
/* Scatter-Gather management */
/* * In PIO mode we have to map each page separately, using kmap(). That way * adjacent pages are mapped to non-adjacent virtual addresses. That's why we * have to use a bounce buffer for blocks, crossing page boundaries. Such blocks * have been observed with an SDIO WiFi card (b43 driver).
*/ staticvoid usdhi6_blk_bounce(struct usdhi6_host *host, struct scatterlist *sg)
{ struct mmc_data *data = host->mrq->data;
size_t blk_head = host->head_len;
host->sg = data->sg; /* TODO: if we always map, this is redundant */
host->offset = host->sg->offset;
}
/* Map the first page in an SG segment: common for multiple and single block IO */ staticvoid *usdhi6_sg_map(struct usdhi6_host *host)
{ struct mmc_data *data = host->mrq->data; struct scatterlist *sg = data->sg_len > 1 ? host->sg : data->sg;
size_t head = PAGE_SIZE - sg->offset;
size_t blk_head = head % data->blksz;
WARN(host->pg.page, "%p not properly unmapped!\n", host->pg.page); if (WARN(sg_dma_len(sg) % data->blksz, "SG size %u isn't a multiple of block size %u\n",
sg_dma_len(sg), data->blksz)) return NULL;
/* * Block size must be a power of 2 for multi-block transfers, * therefore blk_head is equal for all pages in this SG
*/
host->head_len = blk_head;
if (head < data->blksz) /* * The first block in the SG crosses a page boundary. * Max blksz = 512, so blocks can only span 2 pages
*/
usdhi6_blk_bounce(host, sg); else
host->blk_page = host->pg.mapped;
dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p + %u for CMD%u @ 0x%p\n",
host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
sg->offset, host->mrq->cmd->opcode, host->mrq);
return host->blk_page + host->offset;
}
/* Unmap the current page: common for multiple and single block IO */ staticvoid usdhi6_sg_unmap(struct usdhi6_host *host, bool force)
{ struct mmc_data *data = host->mrq->data; struct page *page = host->head_pg.page;
if (!force && sg_dma_len(sg) + sg->offset >
(host->page_idx << PAGE_SHIFT) + data->blksz - blk_head) /* More blocks in this SG, don't unmap the next page */ return;
}
page = host->pg.page; if (!page) return;
flush_dcache_page(page);
kunmap(page);
host->pg.page = NULL;
}
/* Called from MMC_WRITE_MULTIPLE_BLOCK or MMC_READ_MULTIPLE_BLOCK */ staticvoid usdhi6_sg_advance(struct usdhi6_host *host)
{ struct mmc_data *data = host->mrq->data;
size_t done, total;
/* New offset: set at the end of the previous block */ if (host->head_pg.page) { /* Finished a cross-page block, jump to the new page */
host->page_idx++;
host->offset = data->blksz - host->head_len;
host->blk_page = host->pg.mapped;
usdhi6_sg_unmap(host, false);
} else {
host->offset += data->blksz; /* The completed block didn't cross a page boundary */ if (host->offset == PAGE_SIZE) { /* If required, we'll map the page below */
host->offset = 0;
host->page_idx++;
}
}
/* * Now host->blk_page + host->offset point at the end of our last block * and host->page_idx is the index of the page, in which our new block * is located, if any
*/
if (done < total && host->offset) { /* More blocks in this page */ if (host->offset + data->blksz > PAGE_SIZE) /* We approached at a block, that spans 2 pages */
usdhi6_blk_bounce(host, host->sg);
return;
}
/* Finished current page or an SG segment */
usdhi6_sg_unmap(host, false);
if (done == total) { /* * End of an SG segment or the complete SG: jump to the next * segment, we'll map it later in usdhi6_blk_read() or * usdhi6_blk_write()
*/ struct scatterlist *next = sg_next(host->sg);
host->page_idx = 0;
if (!next)
host->wait = USDHI6_WAIT_FOR_DATA_END;
host->sg = next;
if (WARN(next && sg_dma_len(next) % data->blksz, "SG size %u isn't a multiple of block size %u\n",
sg_dma_len(next), data->blksz))
data->error = -EINVAL;
return;
}
/* We cannot get here after crossing a page border */
/* Next page in the same SG */
host->pg.page = nth_page(sg_page(host->sg), host->page_idx);
host->pg.mapped = kmap(host->pg.page);
host->blk_page = host->pg.mapped;
dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p for CMD%u @ 0x%p\n",
host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
host->mrq->cmd->opcode, host->mrq);
}
if (cookie < 0) { /* DMA failed, fall back to PIO */ if (ret >= 0)
ret = cookie;
usdhi6_dma_release(host);
dev_warn(mmc_dev(host->mmc), "DMA failed: %d, falling back to PIO\n", ret);
}
if (host->io_error) {
data->error = usdhi6_error_code(host);
data->bytes_xfered = 0;
usdhi6_dma_kill(host);
usdhi6_dma_release(host);
dev_warn(mmc_dev(host->mmc), "DMA failed: %d, falling back to PIO\n", data->error); return;
}
/* * The datasheet tells us to check a response from the card, whereas * responses only come after the command phase, not after the data * phase. Let's check anyway.
*/ if (host->irq_status & USDHI6_SD_INFO1_RSP_END)
dev_warn(mmc_dev(host->mmc), "Unexpected response received!\n");
}
for (i = 1000; i; i--) { if (usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_SCLKDIVEN) break;
usleep_range(10, 100);
}
if (!i) {
dev_err(mmc_dev(host->mmc), "SD bus busy, clock set aborted\n"); return;
}
val = usdhi6_read(host, USDHI6_SD_CLK_CTRL) & ~USDHI6_SD_CLK_CTRL_DIV_MASK;
if (rate) { unsignedlong new_rate;
if (host->imclk <= rate) { if (ios->timing != MMC_TIMING_UHS_DDR50) { /* Cannot have 1-to-1 clock in DDR mode */
new_rate = host->imclk;
val |= 0xff;
} else {
new_rate = host->imclk / 2;
}
} else { unsignedlong div =
roundup_pow_of_two(DIV_ROUND_UP(host->imclk, rate));
val |= div >> 2;
new_rate = host->imclk / div;
}
if (host->rate == new_rate) return;
host->rate = new_rate;
dev_dbg(mmc_dev(host->mmc), "target %lu, div %u, set %lu\n",
rate, (val & 0xff) << 2, new_rate);
}
/* * if old or new rate is equal to input rate, have to switch the clock * off before changing and on after
*/ if (host->imclk == rate || host->imclk == host->rate || !rate)
usdhi6_write(host, USDHI6_SD_CLK_CTRL,
val & ~USDHI6_SD_CLK_CTRL_SCLKEN);
if (!rate) {
host->rate = 0; return;
}
usdhi6_write(host, USDHI6_SD_CLK_CTRL, val);
if (host->imclk == rate || host->imclk == host->rate ||
!(val & USDHI6_SD_CLK_CTRL_SCLKEN))
usdhi6_write(host, USDHI6_SD_CLK_CTRL,
val | USDHI6_SD_CLK_CTRL_SCLKEN);
}
switch (ios->power_mode) { case MMC_POWER_OFF:
usdhi6_set_power(host, ios);
usdhi6_only_cd(host); break; case MMC_POWER_UP: /* * We only also touch USDHI6_SD_OPTION from .request(), which * cannot race with MMC_POWER_UP
*/
ret = usdhi6_reset(host); if (ret < 0) {
dev_err(mmc_dev(mmc), "Cannot reset the interface!\n");
} else {
usdhi6_set_power(host, ios);
usdhi6_only_cd(host);
} break; case MMC_POWER_ON:
option = usdhi6_read(host, USDHI6_SD_OPTION); /* * The eMMC standard only allows 4 or 8 bits in the DDR mode, * the same probably holds for SD cards. We check here anyway, * since the datasheet explicitly requires 4 bits for DDR.
*/ if (ios->bus_width == MMC_BUS_WIDTH_1) { if (ios->timing == MMC_TIMING_UHS_DDR50)
dev_err(mmc_dev(mmc), "4 bits are required for DDR\n");
option |= USDHI6_SD_OPTION_WIDTH_1;
mode = 0;
} else {
option &= ~USDHI6_SD_OPTION_WIDTH_1;
mode = ios->timing == MMC_TIMING_UHS_DDR50;
}
usdhi6_write(host, USDHI6_SD_OPTION, option);
usdhi6_write(host, USDHI6_SDIF_MODE, mode); break;
}
if (host->rate != ios->clock)
usdhi6_clk_set(host, ios);
}
/* This is data timeout. Response timeout is fixed to 640 clock cycles */ staticvoid usdhi6_timeout_set(struct usdhi6_host *host)
{ struct mmc_request *mrq = host->mrq;
u32 val; unsignedlong ticks;
/* SEC bit is required to enable block counting by the core */
usdhi6_write(host, USDHI6_SD_STOP,
data && data->blocks > 1 ? USDHI6_SD_STOP_SEC : 0);
usdhi6_write(host, USDHI6_SD_ARG, cmd->arg);
staticint usdhi6_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
{ int ret;
ret = mmc_regulator_set_vqmmc(mmc, ios); if (ret < 0) return ret;
ret = usdhi6_set_pinstates(mmc_priv(mmc), ios->signal_voltage); if (ret)
dev_warn_once(mmc_dev(mmc), "Failed to set pinstate err=%d\n", ret); return ret;
}
staticbool usdhi6_read_block(struct usdhi6_host *host)
{ /* ACCESS_END IRQ is already unmasked */ int ret = usdhi6_blk_read(host);
/* * Have to force unmapping both pages: the single block could have been * cross-page, in which case for single-block IO host->page_idx == 0. * So, if we don't force, the second page won't be unmapped.
*/
usdhi6_sg_unmap(host, true);
switch (host->wait) { case USDHI6_WAIT_FOR_REQUEST: /* We're too late, the timeout has already kicked in */ return IRQ_HANDLED; case USDHI6_WAIT_FOR_CMD: /* Wait for data? */
io_wait = usdhi6_end_cmd(host); break; case USDHI6_WAIT_FOR_MREAD: /* Wait for more data? */
io_wait = usdhi6_mread_block(host); break; case USDHI6_WAIT_FOR_READ: /* Wait for data end? */
io_wait = usdhi6_read_block(host); break; case USDHI6_WAIT_FOR_MWRITE: /* Wait data to write? */
io_wait = usdhi6_mwrite_block(host); break; case USDHI6_WAIT_FOR_WRITE: /* Wait for data end? */
io_wait = usdhi6_write_block(host); break; case USDHI6_WAIT_FOR_DMA:
usdhi6_dma_check_error(host); break; case USDHI6_WAIT_FOR_STOP:
usdhi6_write(host, USDHI6_SD_STOP, 0); if (host->io_error) { int ret = usdhi6_error_code(host); if (mrq->stop)
mrq->stop->error = ret; else
mrq->data->error = ret;
dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__, ret); break;
}
usdhi6_resp_cmd12(host);
mrq->stop->error = 0; break; case USDHI6_WAIT_FOR_DATA_END: if (host->io_error) {
mrq->data->error = usdhi6_error_code(host);
dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__,
mrq->data->error);
} break; default:
cmd->error = -EFAULT;
dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait);
usdhi6_request_done(host); return IRQ_HANDLED;
}
if (io_wait) {
schedule_delayed_work(&host->timeout_work, host->timeout); /* Wait for more data or ACCESS_END */ if (!host->dma_active)
usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ); return IRQ_HANDLED;
}
if (!cmd->error) { if (data) { if (!data->error) { if (host->wait != USDHI6_WAIT_FOR_STOP &&
host->mrq->stop &&
!host->mrq->stop->error &&
!usdhi6_stop_cmd(host)) { /* Sending STOP */
usdhi6_wait_for_resp(host);
/* * Actually this should not be needed, if the built-in timeout works reliably in * the both PIO cases and DMA never fails. But if DMA does fail, a timeout * handler might be the only way to catch the error.
*/ staticvoid usdhi6_timeout_work(struct work_struct *work)
{ struct delayed_work *d = to_delayed_work(work); struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work); struct mmc_request *mrq = host->mrq; struct mmc_data *data = mrq ? mrq->data : NULL; struct scatterlist *sg;
mmc = devm_mmc_alloc_host(dev, sizeof(*host)); if (!mmc) return -ENOMEM;
ret = mmc_regulator_get_supply(mmc); if (ret) return ret;
ret = mmc_of_parse(mmc); if (ret < 0) return ret;
host = mmc_priv(mmc);
host->mmc = mmc;
host->wait = USDHI6_WAIT_FOR_REQUEST;
host->timeout = msecs_to_jiffies(USDHI6_REQ_TIMEOUT_MS); /* * We use a fixed timeout of 4s, hence inform the core about it. A * future improvement should instead respect the cmd->busy_timeout.
*/
mmc->max_busy_timeout = USDHI6_REQ_TIMEOUT_MS;
host->pinctrl = devm_pinctrl_get(&pdev->dev); if (IS_ERR(host->pinctrl)) return PTR_ERR(host->pinctrl);
mmc->ops = &usdhi6_ops;
mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
MMC_CAP_SDIO_IRQ; /* Set .max_segs to some random number. Feel free to adjust. */
mmc->max_segs = 32;
mmc->max_blk_size = 512;
mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; /* * Setting .max_seg_size to 1 page would simplify our page-mapping code, * But OTOH, having large segments makes DMA more efficient. We could * check, whether we managed to get DMA and fall back to 1 page * segments, but if we do manage to obtain DMA and then it fails at * run-time and we fall back to PIO, we will continue getting large * segments. So, we wouldn't be able to get rid of the code anyway.
*/
mmc->max_seg_size = mmc->max_req_size; if (!mmc->f_max)
mmc->f_max = host->imclk;
mmc->f_min = host->imclk / 512;
platform_set_drvdata(pdev, host);
ret = mmc_add_host(mmc); if (ret < 0) goto e_release_dma;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.