/* * Databook says that before issuing a new data transfer command * we need to check to see if the card is busy. Data transfer commands * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that. * * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is * expected.
*/ if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
!(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) { if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
status,
!(status & SDMMC_STATUS_BUSY),
10, 500 * USEC_PER_MSEC))
dev_err(host->dev, "Busy; trying anyway\n");
}
}
if (cmd->opcode == SD_SWITCH_VOLTAGE) {
u32 clk_en_a;
/* Special bit makes CMD11 not die */
cmdr |= SDMMC_CMD_VOLT_SWITCH;
/* Change state to continue to handle CMD11 weirdness */
WARN_ON(slot->host->state != STATE_SENDING_CMD);
slot->host->state = STATE_SENDING_CMD11;
/* * We need to disable low power mode (automatic clock stop) * while doing voltage switch so we don't confuse the card, * since stopping the clock is a specific part of the UHS * voltage change dance. * * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be * unconditionally turned back on in dw_mci_setup_bus() if it's * ever called with a non-zero clock. That shouldn't happen * until the voltage change is all done.
*/
clk_en_a = mci_readl(host, CLKENA);
clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
mci_writel(host, CLKENA, clk_en_a);
mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
SDMMC_CMD_PRV_DAT_WAIT, 0);
}
if (cmd->flags & MMC_RSP_PRESENT) { /* We expect a response, so set this bit */
cmdr |= SDMMC_CMD_RESP_EXP; if (cmd->flags & MMC_RSP_136)
cmdr |= SDMMC_CMD_RESP_LONG;
}
if (cmd->flags & MMC_RSP_CRC)
cmdr |= SDMMC_CMD_RESP_CRC;
if (cmd->data) {
cmdr |= SDMMC_CMD_DAT_EXP; if (cmd->data->flags & MMC_DATA_WRITE)
cmdr |= SDMMC_CMD_DAT_WR;
}
if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags))
cmdr |= SDMMC_CMD_USE_HOLD_REG;
/* * The durations we're working with are fairly short so we have to be * extra careful about synchronization here. Specifically in hardware a * command timeout is _at most_ 5.1 ms, so that means we expect an * interrupt (either command done or timeout) to come rather quickly * after the mci_writel. ...but just in case we have a long interrupt * latency let's add a bit of paranoia. * * In general we'll assume that at least an interrupt will be asserted * in hardware by the time the cto_timer runs. ...and if it hasn't * been asserted in hardware by that time then we'll assume it'll never * come.
*/
spin_lock_irqsave(&host->irq_lock, irqflags); if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
mod_timer(&host->cto_timer,
jiffies + msecs_to_jiffies(cto_ms) + 1);
spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
if ((host->use_dma == TRANS_MODE_EDMAC) &&
data && (data->flags & MMC_DATA_READ)) /* Invalidate cache after read */
dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc),
data->sg,
data->sg_len,
DMA_FROM_DEVICE);
host->dma_ops->cleanup(host);
/* * If the card was removed, data will be NULL. No point in trying to * send the stop command or waiting for NBUSY in this case.
*/ if (data) {
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
queue_work(system_bh_wq, &host->bh_work);
}
}
staticint dw_mci_idmac_init(struct dw_mci *host)
{ int i;
if (host->dma_64bit_address == 1) { struct idmac_desc_64addr *p; /* Number of descriptors in the ring buffer */
host->ring_size =
DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
/* Forward link the descriptor list */ for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
i++, p++) {
p->des6 = (host->sg_dma +
(sizeof(struct idmac_desc_64addr) *
(i + 1))) & 0xffffffff;
/* Set the last descriptor as the end-of-ring descriptor */
p->des6 = host->sg_dma & 0xffffffff;
p->des7 = (u64)host->sg_dma >> 32;
p->des0 = IDMAC_DES0_ER;
} else { struct idmac_desc *p; /* Number of descriptors in the ring buffer */
host->ring_size =
DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
/* Forward link the descriptor list */ for (i = 0, p = host->sg_cpu;
i < host->ring_size - 1;
i++, p++) {
p->des3 = cpu_to_le32(host->sg_dma +
(sizeof(struct idmac_desc) * (i + 1)));
p->des0 = 0;
p->des1 = 0;
}
/* Set the last descriptor as the end-of-ring descriptor */
p->des3 = cpu_to_le32(host->sg_dma);
p->des0 = cpu_to_le32(IDMAC_DES0_ER);
}
dw_mci_idmac_reset(host);
if (host->dma_64bit_address == 1) { /* Mask out interrupts - get Tx & Rx complete only */
mci_writel(host, IDSTS64, IDMAC_INT_CLR);
mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
/* Set the descriptor base address */
mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
} else { /* Mask out interrupts - get Tx & Rx complete only */
mci_writel(host, IDSTS, IDMAC_INT_CLR);
mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
/* Set the descriptor base address */
mci_writel(host, DBADDR, host->sg_dma);
}
/* * Wait for the former clear OWN bit operation * of IDMAC to make sure that this descriptor * isn't still owned by IDMAC as IDMAC's write * ops and CPU's read ops are asynchronous.
*/ if (readl_poll_timeout_atomic(&desc->des0, val,
!(val & IDMAC_DES0_OWN),
10, 100 * USEC_PER_MSEC)) goto err_own_bit;
/* * Set the OWN bit and disable interrupts * for this descriptor
*/
desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
IDMAC_DES0_CH;
/* * Wait for the former clear OWN bit operation * of IDMAC to make sure that this descriptor * isn't still owned by IDMAC as IDMAC's write * ops and CPU's read ops are asynchronous.
*/ if (readl_poll_timeout_atomic(&desc->des0, val,
IDMAC_OWN_CLR64(val),
10,
100 * USEC_PER_MSEC)) goto err_own_bit;
/* * Set the OWN bit and disable interrupts * for this descriptor
*/
desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
IDMAC_DES0_DIC |
IDMAC_DES0_CH);
staticint dw_mci_pre_dma_transfer(struct dw_mci *host, struct mmc_data *data, int cookie)
{ struct scatterlist *sg; unsignedint i, sg_len;
if (data->host_cookie == COOKIE_PRE_MAPPED) return data->sg_len;
/* * We don't do DMA on "complex" transfers, i.e. with * non-word-aligned buffers or lengths. Also, we don't bother * with all the DMA setup overhead for short transfers.
*/ if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) return -EINVAL;
/* * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is * in the FIFO region, so we really shouldn't access it).
*/ if (host->verid < DW_MMC_240A ||
(host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE)) return;
/* * Card write Threshold is introduced since 2.80a * It's used when HS400 mode is enabled.
*/ if (data->flags & MMC_DATA_WRITE &&
host->timing != MMC_TIMING_MMC_HS400) goto disable;
/* * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz' * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz * Currently just choose blksz.
*/
thld_size = blksz;
mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable)); return;
/* * Decide the MSIZE and RX/TX Watermark. * If current block size is same with previous size, * no need to update fifoth.
*/ if (host->prev_blksz != data->blksz)
dw_mci_adjust_fifoth(host, data);
if (host->dma_ops->start(host, sg_len)) {
host->dma_ops->stop(host); /* We can't do DMA, try PIO for this one */
dev_dbg(host->dev, "%s: fall back to PIO mode for current transfer\n",
__func__); return -ENODEV;
}
/* * Use the initial fifoth_val for PIO mode. If wm_algined * is set, we set watermark same as data size. * If next issued data may be transferred by DMA mode, * prev_blksz should be invalidated.
*/ if (host->wm_aligned)
dw_mci_adjust_fifoth(host, data); else
mci_writel(host, FIFOTH, host->fifoth_val);
host->prev_blksz = 0;
} else { /* * Keep the current block size. * It will be used to decide whether to update * fifoth register next time.
*/
host->prev_blksz = data->blksz;
}
}
/* We must continue to set bit 28 in CMD until the change is complete */ if (host->state == STATE_WAITING_CMD11_DONE)
sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
slot->mmc->actual_clock = 0;
if (!clock) {
mci_writel(host, CLKENA, 0);
mci_send_cmd(slot, sdmmc_cmd_bits, 0);
} elseif (clock != host->current_speed || force_clkinit) {
div = host->bus_hz / clock; if (host->bus_hz % clock && host->bus_hz > clock) /* * move the + 1 after the divide to prevent * over-clocking the card.
*/
div += 1;
div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
if ((clock != slot->__clk_old &&
!test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) ||
force_clkinit) { /* Silent the verbose log if calling from PM context */ if (!force_clkinit)
dev_info(&slot->mmc->class_dev, "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
slot->id, host->bus_hz, clock,
div ? ((host->bus_hz / div) >> 1) :
host->bus_hz, div);
/* * If card is polling, display the message only * one time at boot time.
*/ if (slot->mmc->caps & MMC_CAP_NEEDS_POLL &&
slot->mmc->f_min == clock)
set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags);
}
/* keep the last clock value that was requested from core */
slot->__clk_old = clock;
slot->mmc->actual_clock = div ? ((host->bus_hz / div) >> 1) :
host->bus_hz;
}
host->current_speed = clock;
/* Set the current slot bus width */
mci_writel(host, CTYPE, (slot->ctype << slot->id));
}
/* this is the first command, send the initialization clock */ if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
cmdflags |= SDMMC_CMD_INIT;
if (cmd->opcode == SD_SWITCH_VOLTAGE) { unsignedlong irqflags;
/* * Databook says to fail after 2ms w/ no response, but evidence * shows that sometimes the cmd11 interrupt takes over 130ms. * We'll set to 500ms, plus an extra jiffy just in case jiffies * is just about to roll over. * * We do this whole thing under spinlock and only if the * command hasn't already completed (indicating the irq * already ran so we don't want the timeout).
*/
spin_lock_irqsave(&host->irq_lock, irqflags); if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
mod_timer(&host->cmd11_timer,
jiffies + msecs_to_jiffies(500) + 1);
spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
/* must be called with host->lock held */ staticvoid dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, struct mmc_request *mrq)
{
dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
host->state);
slot->mrq = mrq;
if (host->state == STATE_WAITING_CMD11_DONE) {
dev_warn(&slot->mmc->class_dev, "Voltage change didn't complete\n"); /* * this case isn't expected to happen, so we can * either crash here or just try to continue on * in the closest possible state
*/
host->state = STATE_IDLE;
}
/* * The check for card presence and queueing of the request must be * atomic, otherwise the card could be removed in between and the * request wouldn't fail until another card was inserted.
*/
if (!dw_mci_get_cd(mmc)) {
mrq->cmd->error = -ENOMEDIUM;
mmc_request_done(mmc, mrq); return;
}
if (drv_data && drv_data->switch_voltage) return drv_data->switch_voltage(mmc, ios);
/* * Program the voltage. Note that some instances of dw_mmc may use * the UHS_REG for this. For other instances (like exynos) the UHS_REG * does no harm but you need to set the regulator directly. Try both.
*/
uhs = mci_readl(host, UHS_REG); if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
uhs &= ~v18; else
uhs |= v18;
if (!IS_ERR(mmc->supply.vqmmc)) {
ret = mmc_regulator_set_vqmmc(mmc, ios); if (ret < 0) {
dev_dbg(&mmc->class_dev, "Regulator set error %d - %s V\n",
ret, uhs & v18 ? "1.8" : "3.3"); return ret;
}
}
mci_writel(host, UHS_REG, uhs);
return 0;
}
staticint dw_mci_get_ro(struct mmc_host *mmc)
{ int read_only; struct dw_mci_slot *slot = mmc_priv(mmc); int gpio_ro = mmc_gpio_get_ro(mmc);
/* * Low power mode will stop the card clock when idle. According to the * description of the CLKENA register we should disable low power mode * for SDIO cards if we need SDIO interrupts to work.
*/
/* Avoid runtime suspending the device when SDIO IRQ is enabled */ if (enb)
pm_runtime_get_noresume(host->dev); else
pm_runtime_put_noidle(host->dev);
}
if (drv_data && drv_data->prepare_hs400_tuning) return drv_data->prepare_hs400_tuning(host, ios);
return 0;
}
staticbool dw_mci_reset(struct dw_mci *host)
{
u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET; bool ret = false;
u32 status = 0;
/* * Resetting generates a block interrupt, hence setting * the scatter-gather pointer to NULL.
*/ if (host->sg) {
sg_miter_stop(&host->sg_miter);
host->sg = NULL;
}
if (host->use_dma)
flags |= SDMMC_CTRL_DMA_RESET;
if (dw_mci_ctrl_reset(host, flags)) { /* * In all cases we clear the RAWINTS * register to clear any interrupts.
*/
mci_writel(host, RINTSTS, 0xFFFFFFFF);
if (!host->use_dma) {
ret = true; goto ciu_out;
}
/* Wait for dma_req to be cleared */ if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
status,
!(status & SDMMC_STATUS_DMA_REQ),
1, 500 * USEC_PER_MSEC)) {
dev_err(host->dev, "%s: Timeout waiting for dma_req to be cleared\n",
__func__); goto ciu_out;
}
/* when using DMA next we reset the fifo again */ if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET)) goto ciu_out;
} else { /* if the controller reset bit did clear, then set clock regs */ if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
dev_err(host->dev, "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
__func__); goto ciu_out;
}
}
if (host->use_dma == TRANS_MODE_IDMAC) /* It is also required that we reinit idmac */
dw_mci_idmac_init(host);
ret = true;
ciu_out: /* After a CTRL reset we need to have CIU set clock registers */
mci_send_cmd(host->slot, SDMMC_CMD_UPD_CLK, 0);
/* * Only inject an error if we haven't already got an error or data over * interrupt.
*/ if (!host->data_status) {
host->data_status = SDMMC_INT_DCRC;
set_bit(EVENT_DATA_ERROR, &host->pending_events);
queue_work(system_bh_wq, &host->bh_work);
}
if (!should_fail(&host->fail_data_crc, 1)) return;
/* * Try to inject the error at random points during the data transfer.
*/
hrtimer_start(&host->fault_timer,
ms_to_ktime(get_random_u32_below(25)),
HRTIMER_MODE_REL);
}
if (status & DW_MCI_DATA_ERROR_FLAGS) { if (status & SDMMC_INT_DRTO) {
data->error = -ETIMEDOUT;
} elseif (status & SDMMC_INT_DCRC) {
data->error = -EILSEQ;
} elseif (status & SDMMC_INT_EBE) { if (host->dir_status ==
DW_MCI_SEND_STATUS) { /* * No data CRC status was returned. * The number of bytes transferred * will be exaggerated in PIO mode.
*/
data->bytes_xfered = 0;
data->error = -ETIMEDOUT;
} elseif (host->dir_status ==
DW_MCI_RECV_STATUS) {
data->error = -EILSEQ;
}
} else { /* SDMMC_INT_SBE is included */
data->error = -EILSEQ;
}
dev_dbg(host->dev, "data error, status 0x%08x\n", status);
/* * After an error, there may be data lingering * in the FIFO
*/
dw_mci_reset(host);
} else {
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
}
staticbool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
{ if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) returnfalse;
/* * Really be certain that the timer has stopped. This is a bit of * paranoia and could only really happen if we had really bad * interrupt latency and the interrupt routine and timeout were * running concurrently so that the timer_delete() in the interrupt * handler couldn't run.
*/
WARN_ON(timer_delete_sync(&host->cto_timer));
clear_bit(EVENT_CMD_COMPLETE, &host->pending_events);
returntrue;
}
staticbool dw_mci_clear_pending_data_complete(struct dw_mci *host)
{ if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) returnfalse;
/* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */
WARN_ON(timer_delete_sync(&host->dto_timer));
clear_bit(EVENT_DATA_COMPLETE, &host->pending_events);
if (cmd->data && err) { /* * During UHS tuning sequence, sending the stop * command after the response CRC error would * throw the system into a confused state * causing all future tuning phases to report * failure. * * In such case controller will move into a data * transfer state after a response error or * response CRC error. Let's let that finish * before trying to send a stop, so we'll go to * STATE_SENDING_DATA. * * Although letting the data transfer take place * will waste a bit of time (we already know * the command was bad), it can't cause any * errors since it's possible it would have * taken place anyway if this bh work got * delayed. Allowing the transfer to take place * avoids races and keeps things simple.
*/ if (err != -ETIMEDOUT &&
host->dir_status == DW_MCI_RECV_STATUS) {
state = STATE_SENDING_DATA; continue;
}
send_stop_abort(host, data);
dw_mci_stop_dma(host);
state = STATE_SENDING_STOP; break;
}
if (!cmd->data || err) {
dw_mci_request_end(host, mrq); goto unlock;
}
prev_state = state = STATE_SENDING_DATA;
fallthrough;
case STATE_SENDING_DATA: /* * We could get a data error and never a transfer * complete so we'd better check for it here. * * Note that we don't really care if we also got a * transfer complete; stopping the DMA and sending an * abort won't hurt.
*/ if (test_and_clear_bit(EVENT_DATA_ERROR,
&host->pending_events)) { if (!(host->data_status & (SDMMC_INT_DRTO |
SDMMC_INT_EBE)))
send_stop_abort(host, data);
dw_mci_stop_dma(host);
state = STATE_DATA_ERROR; break;
}
if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
&host->pending_events)) { /* * If all data-related interrupts don't come * within the given time in reading data state.
*/ if (host->dir_status == DW_MCI_RECV_STATUS)
dw_mci_set_drto(host); break;
}
/* * Handle an EVENT_DATA_ERROR that might have shown up * before the transfer completed. This might not have * been caught by the check above because the interrupt * could have gone off between the previous check and * the check for transfer complete. * * Technically this ought not be needed assuming we * get a DATA_COMPLETE eventually (we'll notice the * error and end the request), but it shouldn't hurt. * * This has the advantage of sending the stop command.
*/ if (test_and_clear_bit(EVENT_DATA_ERROR,
&host->pending_events)) { if (!(host->data_status & (SDMMC_INT_DRTO |
SDMMC_INT_EBE)))
send_stop_abort(host, data);
dw_mci_stop_dma(host);
state = STATE_DATA_ERROR; break;
}
prev_state = state = STATE_DATA_BUSY;
fallthrough;
case STATE_DATA_BUSY: if (!dw_mci_clear_pending_data_complete(host)) { /* * If data error interrupt comes but data over * interrupt doesn't come within the given time. * in reading data state.
*/ if (host->dir_status == DW_MCI_RECV_STATUS)
dw_mci_set_drto(host); break;
}
if (!err) { if (!data->stop || mrq->sbc) { if (mrq->sbc && data->stop)
data->stop->error = 0;
dw_mci_request_end(host, mrq); goto unlock;
}
/* stop command for open-ended transfer*/ if (data->stop)
send_stop_abort(host, data);
} else { /* * If we don't have a command complete now we'll * never get one since we just reset everything; * better end the request. * * If we do have a command complete we'll fall * through to the SENDING_STOP command and * everything will be peachy keen.
*/ if (!test_bit(EVENT_CMD_COMPLETE,
&host->pending_events)) {
host->cmd = NULL;
dw_mci_request_end(host, mrq); goto unlock;
}
}
/* * If err has non-zero, * stop-abort command has been already issued.
*/
prev_state = state = STATE_SENDING_STOP;
fallthrough;
case STATE_SENDING_STOP: if (!dw_mci_clear_pending_cmd_complete(host)) break;
/* CMD error in data command */ if (mrq->cmd->error && mrq->data)
dw_mci_reset(host);
/* push final bytes to part_buf, only use during push */ staticvoid dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
{
memcpy((void *)&host->part_buf, buf, cnt);
host->part_buf_count = cnt;
}
/* append bytes to part_buf, only use during push */ staticint dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
{
cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
host->part_buf_count += cnt; return cnt;
}
/* pull first bytes from part_buf, only use during pull */ staticint dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
{
cnt = min_t(int, cnt, host->part_buf_count); if (cnt) {
memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
cnt);
host->part_buf_count -= cnt;
host->part_buf_start += cnt;
} return cnt;
}
/* pull final bytes from the part_buf, assuming it's just been filled */ staticvoid dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
{
memcpy(buf, &host->part_buf, cnt);
host->part_buf_start = cnt;
host->part_buf_count = (1 << host->data_shift) - cnt;
}
staticvoid dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
{ struct mmc_data *data = host->data; int init_cnt = cnt;
/* try and push anything in the part_buf */ if (unlikely(host->part_buf_count)) { int len = dw_mci_push_part_bytes(host, buf, cnt);
buf += len;
cnt -= len; if (host->part_buf_count == 2) {
mci_fifo_writew(host->fifo_reg, host->part_buf16);
host->part_buf_count = 0;
}
} #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS if (unlikely((unsignedlong)buf & 0x1)) { while (cnt >= 2) {
u16 aligned_buf[64]; int len = min(cnt & -2, (int)sizeof(aligned_buf)); int items = len >> 1; int i; /* memcpy from input buffer into aligned buffer */
memcpy(aligned_buf, buf, len);
buf += len;
cnt -= len; /* push data from aligned buffer into fifo */ for (i = 0; i < items; ++i)
mci_fifo_writew(host->fifo_reg, aligned_buf[i]);
}
} else #endif
{
u16 *pdata = buf;
for (; cnt >= 2; cnt -= 2)
mci_fifo_writew(host->fifo_reg, *pdata++);
buf = pdata;
} /* put anything remaining in the part_buf */ if (cnt) {
dw_mci_set_part_bytes(host, buf, cnt); /* Push data if we have reached the expected data length */ if ((data->bytes_xfered + init_cnt) ==
(data->blksz * data->blocks))
mci_fifo_writew(host->fifo_reg, host->part_buf16);
}
}
staticvoid dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
{ #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS if (unlikely((unsignedlong)buf & 0x1)) { while (cnt >= 2) { /* pull data from fifo into aligned buffer */
u16 aligned_buf[64]; int len = min(cnt & -2, (int)sizeof(aligned_buf)); int items = len >> 1; int i;
for (i = 0; i < items; ++i)
aligned_buf[i] = mci_fifo_readw(host->fifo_reg); /* memcpy from aligned buffer into output buffer */
memcpy(buf, aligned_buf, len);
buf += len;
cnt -= len;
}
} else #endif
{
u16 *pdata = buf;
staticvoid dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
{ struct mmc_data *data = host->data; int init_cnt = cnt;
/* try and push anything in the part_buf */ if (unlikely(host->part_buf_count)) { int len = dw_mci_push_part_bytes(host, buf, cnt);
buf += len;
cnt -= len; if (host->part_buf_count == 4) {
mci_fifo_writel(host->fifo_reg, host->part_buf32);
host->part_buf_count = 0;
}
} #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS if (unlikely((unsignedlong)buf & 0x3)) { while (cnt >= 4) {
u32 aligned_buf[32]; int len = min(cnt & -4, (int)sizeof(aligned_buf)); int items = len >> 2; int i; /* memcpy from input buffer into aligned buffer */
memcpy(aligned_buf, buf, len);
buf += len;
cnt -= len; /* push data from aligned buffer into fifo */ for (i = 0; i < items; ++i)
mci_fifo_writel(host->fifo_reg, aligned_buf[i]);
}
} else #endif
{
u32 *pdata = buf;
for (; cnt >= 4; cnt -= 4)
mci_fifo_writel(host->fifo_reg, *pdata++);
buf = pdata;
} /* put anything remaining in the part_buf */ if (cnt) {
dw_mci_set_part_bytes(host, buf, cnt); /* Push data if we have reached the expected data length */ if ((data->bytes_xfered + init_cnt) ==
(data->blksz * data->blocks))
mci_fifo_writel(host->fifo_reg, host->part_buf32);
}
}
staticvoid dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
{ #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS if (unlikely((unsignedlong)buf & 0x3)) { while (cnt >= 4) { /* pull data from fifo into aligned buffer */
u32 aligned_buf[32]; int len = min(cnt & -4, (int)sizeof(aligned_buf)); int items = len >> 2; int i;
for (i = 0; i < items; ++i)
aligned_buf[i] = mci_fifo_readl(host->fifo_reg); /* memcpy from aligned buffer into output buffer */
memcpy(buf, aligned_buf, len);
buf += len;
cnt -= len;
}
} else #endif
{
u32 *pdata = buf;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.