/* * From V5, register spaces have changed. Wrap this info in a structure * and choose the data_structure based on version info mentioned in DT.
*/ struct sdhci_msm_variant_info { bool mci_removed; bool restore_dll_config; conststruct sdhci_msm_variant_ops *var_ops; conststruct sdhci_msm_offset *offset;
};
/* * APIs to read/write to vendor specific registers which were there in the * core_mem region before MCI was removed.
*/ static u32 sdhci_msm_mci_variant_readl_relaxed(struct sdhci_host *host,
u32 offset)
{ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
staticunsignedint msm_get_clock_mult_for_bus_mode(struct sdhci_host *host)
{ struct mmc_ios ios = host->mmc->ios; /* * The SDHC requires internal clock frequency to be double the * actual clock that will be set for DDR mode. The controller * uses the faster clock(100/400MHz) for some of its parts and * send the actual required clock (50/200MHz) to the card.
*/ if (ios.timing == MMC_TIMING_UHS_DDR50 ||
ios.timing == MMC_TIMING_MMC_DDR52 ||
ios.timing == MMC_TIMING_MMC_HS400 ||
host->flags & SDHCI_HS400_TUNING) return 2; return 1;
}
mult = msm_get_clock_mult_for_bus_mode(host);
desired_rate = clock * mult;
rc = dev_pm_opp_set_rate(mmc_dev(host->mmc), desired_rate); if (rc) {
pr_err("%s: Failed to set clock at rate %u at timing %d\n",
mmc_hostname(host->mmc), desired_rate, curr_ios.timing); return;
}
/* * Qualcomm clock drivers by default round clock _up_ if they can't * make the requested rate. This is not good for SD. Yell if we * encounter it.
*/
achieved_rate = clk_get_rate(core_clk); if (achieved_rate > desired_rate)
pr_warn("%s: Card appears overclocked; req %u Hz, actual %lu Hz\n",
mmc_hostname(host->mmc), desired_rate, achieved_rate);
host->mmc->actual_clock = achieved_rate / mult;
/* Stash the rate we requested to use in sdhci_msm_runtime_resume() */
msm_host->clk_rate = desired_rate;
pr_debug("%s: Setting clock at rate %lu at timing %d\n",
mmc_hostname(host->mmc), achieved_rate, curr_ios.timing);
}
/* Poll for CK_OUT_EN bit. max. poll time = 50us */
ck_out_en = !!(readl_relaxed(host->ioaddr +
msm_offset->core_dll_config) & CORE_CK_OUT_EN);
while (ck_out_en != poll) { if (--wait_cnt == 0) {
dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n",
mmc_hostname(mmc), poll); return -ETIMEDOUT;
}
udelay(1);
err_out:
dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n",
mmc_hostname(mmc), phase);
out:
spin_unlock_irqrestore(&host->lock, flags); return rc;
}
/* * Find out the greatest range of consecuitive selected * DLL clock output phases that can be used as sampling * setting for SD3.0 UHS-I card read operation (in SDR104 * timing mode) or for eMMC4.5 card read operation (in * HS400/HS200 timing mode). * Select the 3/4 of the range and configure the DLL with the * selected DLL clock output phase.
*/
if ((cnt + 1) == total_phases) { continue; /* check if next phase in phase_table is consecutive or not */
} elseif ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
row_index++;
col_index = 0;
}
}
if (row_index >= MAX_PHASES) return -EINVAL;
/* Check if phase-0 is present in first valid window? */ if (!ranges[0][0]) {
phase_0_found = true;
phase_0_raw_index = 0; /* Check if cycle exist between 2 valid windows */ for (cnt = 1; cnt <= row_index; cnt++) { if (phases_per_row[cnt]) { for (i = 0; i < phases_per_row[cnt]; i++) { if (ranges[cnt][i] == 15) {
phase_15_found = true;
phase_15_raw_index = cnt; break;
}
}
}
}
}
/* If 2 valid windows form cycle then merge them as single window */ if (phase_0_found && phase_15_found) { /* number of phases in raw where phase 0 is present */
u8 phases_0 = phases_per_row[phase_0_raw_index]; /* number of phases in raw where phase 15 is present */
u8 phases_15 = phases_per_row[phase_15_raw_index];
if (phases_0 + phases_15 >= MAX_PHASES) /* * If there are more than 1 phase windows then total * number of phases in both the windows should not be * more than or equal to MAX_PHASES.
*/ return -EINVAL;
/* Merge 2 cyclic windows */
i = phases_15; for (cnt = 0; cnt < phases_0; cnt++) {
ranges[phase_15_raw_index][i] =
ranges[phase_0_raw_index][cnt]; if (++i >= MAX_PHASES) break;
}
if (msm_host->use_14lpp_dll_reset && !IS_ERR_OR_NULL(msm_host->xo_clk))
xo_clk = clk_get_rate(msm_host->xo_clk);
spin_lock_irqsave(&host->lock, flags);
/* * Make sure that clock is always enabled when DLL * tuning is in progress. Keeping PWRSAVE ON may * turn off the clock.
*/
config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
config &= ~CORE_CLK_PWRSAVE;
writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
if (msm_host->dll_config)
writel_relaxed(msm_host->dll_config,
host->ioaddr + msm_offset->core_dll_config);
/* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */ while (!(readl_relaxed(host->ioaddr + msm_offset->core_dll_status) &
CORE_DLL_LOCK)) { /* max. wait for 50us sec for LOCK bit to be set */ if (--wait_cnt == 0) {
dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n",
mmc_hostname(mmc));
spin_unlock_irqrestore(&host->lock, flags); return -ETIMEDOUT;
}
udelay(1);
}
/* * Disable HC_SELECT_IN to be able to use the UHS mode select * configuration from Host Control2 register for all other * modes. * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field * in VENDOR_SPEC_FUNC
*/
config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
config &= ~CORE_HC_SELECT_IN_EN;
config &= ~CORE_HC_SELECT_IN_MASK;
writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
/* * Make sure above writes impacting free running MCLK are completed * before changing the clk_rate at GCC.
*/
wmb();
}
writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec); /* * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC * register
*/ if ((msm_host->tuning_done || ios.enhanced_strobe) &&
!msm_host->calibration_done) {
config = readl_relaxed(host->ioaddr +
msm_offset->core_vendor_spec);
config |= CORE_HC_SELECT_IN_HS400;
config |= CORE_HC_SELECT_IN_EN;
writel_relaxed(config, host->ioaddr +
msm_offset->core_vendor_spec);
} if (!msm_host->clk_rate && !msm_host->use_cdclp533) { /* * Poll on DLL_LOCK or DDR_DLL_LOCK bits in * core_dll_status to be set. This should get set * within 15 us at 200 MHz.
*/
rc = readl_relaxed_poll_timeout(host->ioaddr +
msm_offset->core_dll_status,
dll_lock,
(dll_lock &
(CORE_DLL_LOCK |
CORE_DDR_DLL_LOCK)), 10,
1000); if (rc == -ETIMEDOUT)
pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
mmc_hostname(host->mmc), dll_lock);
} /* * Make sure above writes impacting free running MCLK are completed * before changing the clk_rate at GCC.
*/
wmb();
}
/* * sdhci_msm_hc_select_mode :- In general all timing modes are * controlled via UHS mode select in Host Control2 register. * eMMC specific HS200/HS400 doesn't have their respective modes * defined here, hence we use these values. * * HS200 - SDR104 (Since they both are equivalent in functionality) * HS400 - This involves multiple configurations * Initially SDR104 - when tuning is required as HS200 * Then when switching to DDR @ 400MHz (HS400) we use * the vendor specific HC_SELECT_IN to control the mode. * * In addition to controlling the modes we also need to select the * correct input clock for DLL depending on the mode. * * HS400 - divided clock (free running MCLK/2) * All other modes - default (free running MCLK)
*/ staticvoid sdhci_msm_hc_select_mode(struct sdhci_host *host)
{ struct mmc_ios ios = host->mmc->ios;
/* * Retuning in HS400 (DDR mode) will fail, just reset the * tuning block and restore the saved tuning phase.
*/
ret = msm_init_cm_dll(host); if (ret) goto out;
/* Set the selected phase in delay line hw block */
ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase); if (ret) goto out;
/* * Currently the core_ddr_config register defaults to desired * configuration on reset. Currently reprogramming the power on * reset (POR) value in case it might have been modified by * bootloaders. In the future, if this changes, then the desired * values will need to be programmed appropriately.
*/ if (msm_host->updated_ddr_cfg)
ddr_cfg_offset = msm_offset->core_ddr_config; else
ddr_cfg_offset = msm_offset->core_ddr_config_old;
writel_relaxed(msm_host->ddr_config, host->ioaddr + ddr_cfg_offset);
if (ret == -ETIMEDOUT) {
pr_err("%s: %s: CM_DLL_SDC4 calibration was not completed\n",
mmc_hostname(host->mmc), __func__); goto out;
}
/* * Set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3. * When MCLK is gated OFF, it is not gated for less than 0.5us * and MCLK must be switched on for at-least 1us before DATA * starts coming. Controllers with 14lpp and later tech DLL cannot * guarantee above requirement. So PWRSAVE_DLL should not be * turned on for host controllers using this DLL.
*/ if (!msm_host->use_14lpp_dll_reset) {
config = readl_relaxed(host->ioaddr +
msm_offset->core_vendor_spec3);
config |= CORE_PWRSAVE_DLL;
writel_relaxed(config, host->ioaddr +
msm_offset->core_vendor_spec3);
}
/* * Drain writebuffer to ensure above DLL calibration * and PWRSAVE DLL is enabled.
*/
wmb();
out:
pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
__func__, ret); return ret;
}
/* * Retuning in HS400 (DDR mode) will fail, just reset the * tuning block and restore the saved tuning phase.
*/
ret = msm_init_cm_dll(host); if (ret) goto out;
if (!mmc->ios.enhanced_strobe) { /* Set the selected phase in delay line hw block */
ret = msm_config_cm_dll_phase(host,
msm_host->saved_tuning_phase); if (ret) goto out;
config = readl_relaxed(host->ioaddr +
msm_offset->core_dll_config);
config |= CORE_CMD_DAT_TRACK_SEL;
writel_relaxed(config, host->ioaddr +
msm_offset->core_dll_config);
}
if (msm_host->use_cdclp533)
ret = sdhci_msm_cdclp533_calibration(host); else
ret = sdhci_msm_cm_dll_sdc4_calibration(host);
out:
pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
__func__, ret); return ret;
}
if (ios->timing == MMC_TIMING_UHS_SDR50 &&
host->flags & SDHCI_SDR50_NEEDS_TUNING) returntrue;
/* * Tuning is required for SDR104, HS200 and HS400 cards and * if clock frequency is greater than 100MHz in these modes.
*/ if (host->clock <= CORE_FREQ_100MHZ ||
!(ios->timing == MMC_TIMING_MMC_HS400 ||
ios->timing == MMC_TIMING_MMC_HS200 ||
ios->timing == MMC_TIMING_UHS_SDR104) ||
ios->enhanced_strobe) returnfalse;
/* * For HS400 tuning in HS200 timing requires: * - select MCLK/2 in VENDOR_SPEC * - program MCLK to 400MHz (or nearest supported) in GCC
*/ if (host->flags & SDHCI_HS400_TUNING) {
sdhci_msm_hc_select_mode(host);
msm_set_clock_rate_for_bus_mode(host, ios.clock);
host->flags &= ~SDHCI_HS400_TUNING;
}
retry: /* First of all reset the tuning block */
rc = msm_init_cm_dll(host); if (rc) return rc;
phase = 0; do { /* Set the phase in delay line hw block */
rc = msm_config_cm_dll_phase(host, phase); if (rc) return rc;
rc = mmc_send_tuning(mmc, opcode, NULL); if (!rc) { /* Tuning is successful at this tuning point */
tuned_phases[tuned_phase_cnt++] = phase;
dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
mmc_hostname(mmc), phase);
}
} while (++phase < ARRAY_SIZE(tuned_phases));
if (tuned_phase_cnt) { if (tuned_phase_cnt == ARRAY_SIZE(tuned_phases)) { /* * All phases valid is _almost_ as bad as no phases * valid. Probably all phases are not really reliable * but we didn't detect where the unreliable place is. * That means we'll essentially be guessing and hoping * we get a good phase. Better to try a few times.
*/
dev_dbg(mmc_dev(mmc), "%s: All phases valid; try again\n",
mmc_hostname(mmc)); if (--tuning_seq_cnt) {
tuned_phase_cnt = 0; goto retry;
}
}
/* * Finally set the selected phase in delay * line hw block.
*/
rc = msm_config_cm_dll_phase(host, phase); if (rc) return rc;
msm_host->saved_tuning_phase = phase;
dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n",
mmc_hostname(mmc), phase);
} else { if (--tuning_seq_cnt) goto retry; /* Tuning failed */
dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n",
mmc_hostname(mmc));
rc = -EIO;
}
if (!rc)
msm_host->tuning_done = true; return rc;
}
/* * sdhci_msm_hs400 - Calibrate the DLL for HS400 bus speed mode operation. * This needs to be done for both tuning and enhanced_strobe mode. * DLL operation is only needed for clock > 100MHz. For clock <= 100MHz * fixed feedback clock is used.
*/ staticvoid sdhci_msm_hs400(struct sdhci_host *host, struct mmc_ios *ios)
{ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); int ret;
if (host->clock > CORE_FREQ_100MHZ &&
(msm_host->tuning_done || ios->enhanced_strobe) &&
!msm_host->calibration_done) {
ret = sdhci_msm_hs400_dll_calibration(host); if (!ret)
msm_host->calibration_done = true; else
pr_err("%s: Failed to calibrate DLL for hs400 mode (%d)\n",
mmc_hostname(host->mmc), ret);
}
}
ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); /* Select Bus Speed Mode for host */
ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; switch (uhs) { case MMC_TIMING_UHS_SDR12:
ctrl_2 |= SDHCI_CTRL_UHS_SDR12; break; case MMC_TIMING_UHS_SDR25:
ctrl_2 |= SDHCI_CTRL_UHS_SDR25; break; case MMC_TIMING_UHS_SDR50:
ctrl_2 |= SDHCI_CTRL_UHS_SDR50; break; case MMC_TIMING_MMC_HS400: case MMC_TIMING_MMC_HS200: case MMC_TIMING_UHS_SDR104:
ctrl_2 |= SDHCI_CTRL_UHS_SDR104; break; case MMC_TIMING_UHS_DDR50: case MMC_TIMING_MMC_DDR52:
ctrl_2 |= SDHCI_CTRL_UHS_DDR50; break;
}
/* * When clock frequency is less than 100MHz, the feedback clock must be * provided and DLL must not be used so that tuning can be skipped. To * provide feedback clock, the mode selection can be any value less * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
*/ if (host->clock <= CORE_FREQ_100MHZ) { if (uhs == MMC_TIMING_MMC_HS400 ||
uhs == MMC_TIMING_MMC_HS200 ||
uhs == MMC_TIMING_UHS_SDR104)
ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; /* * DLL is not required for clock <= 100MHz * Thus, make sure DLL it is disabled when not required
*/
config = readl_relaxed(host->ioaddr +
msm_offset->core_dll_config);
config |= CORE_DLL_RST;
writel_relaxed(config, host->ioaddr +
msm_offset->core_dll_config);
if (level) { /* Set the IO voltage regulator to default voltage level */ if (msm_host->caps_0 & CORE_3_0V_SUPPORT)
ios.signal_voltage = MMC_SIGNAL_VOLTAGE_330; elseif (msm_host->caps_0 & CORE_1_8V_SUPPORT)
ios.signal_voltage = MMC_SIGNAL_VOLTAGE_180;
if (msm_host->caps_0 & CORE_VOLT_SUPPORT) {
ret = mmc_regulator_set_vqmmc(mmc, &ios); if (ret < 0) {
dev_err(mmc_dev(mmc), "%s: vqmmc set volgate failed: %d\n",
mmc_hostname(mmc), ret); goto out;
}
}
ret = regulator_enable(mmc->supply.vqmmc);
} else {
ret = regulator_disable(mmc->supply.vqmmc);
}
if (IS_ERR(mmc->supply.vqmmc) ||
(mmc->ios.power_mode == MMC_POWER_UNDEFINED)) return 0; /* * For eMMC don't turn off Vqmmc, Instead just configure it in LPM * and HPM modes by setting the corresponding load. * * Till eMMC is initialized (i.e. always_on == 0), just turn on/off * Vqmmc. Vqmmc gets turned off only if init fails and mmc_power_off * gets invoked. Once eMMC is initialized (i.e. always_on == 1), * Vqmmc should remain ON, So just set the load instead of turning it * off/on.
*/
always_on = !mmc_card_is_removable(mmc) &&
mmc->card && mmc_card_mmc(mmc->card);
if (always_on)
ret = msm_config_vqmmc_mode(msm_host, mmc, level); else
ret = msm_toggle_vqmmc(msm_host, mmc, level);
/* * sdhci_msm_check_power_status API should be called when registers writes * which can toggle sdhci IO bus ON/OFF or change IO lines HIGH/LOW happens. * To what state the register writes will change the IO lines should be passed * as the argument req_type. This API will check whether the IO line's state * is already the expected state and will wait for power irq only if * power irq is expected to be triggered based on the current IO line state * and expected IO line state.
*/ staticvoid sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
{ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); struct mmc_host *mmc = host->mmc; bool done = false;
u32 val = SWITCHABLE_SIGNALING_VOLTAGE; conststruct sdhci_msm_offset *msm_offset =
msm_host->offset;
/* * The power interrupt will not be generated for signal voltage * switches if SWITCHABLE_SIGNALING_VOLTAGE in MCI_GENERICS is not set. * Since sdhci-msm-v5, this bit has been removed and SW must consider * it as always set.
*/ if (!msm_host->mci_removed)
val = msm_host_readl(msm_host, host,
msm_offset->core_generics); if ((req_type & REQ_IO_HIGH || req_type & REQ_IO_LOW) &&
!(val & SWITCHABLE_SIGNALING_VOLTAGE)) { return;
}
/* * The IRQ for request type IO High/LOW will be generated when - * there is a state change in 1.8V enable bit (bit 3) of * SDHCI_HOST_CONTROL2 register. The reset state of that bit is 0 * which indicates 3.3V IO voltage. So, when MMC core layer tries * to set it to 3.3V before card detection happens, the * IRQ doesn't get triggered as there is no state change in this bit. * The driver already handles this case by changing the IO voltage * level to high as part of controller power up sequence. Hence, check * for host->pwr to handle a case where IO voltage high request is * issued even before controller power up.
*/ if ((req_type & REQ_IO_HIGH) && !host->pwr) {
pr_debug("%s: do not wait for power IRQ that never comes, req_type: %d\n",
mmc_hostname(host->mmc), req_type); return;
} if ((req_type & msm_host->curr_pwr_state) ||
(req_type & msm_host->curr_io_level))
done = true; /* * This is needed here to handle cases where register writes will * not change the current bus state or io level of the controller. * In this case, no power irq will be triggerred and we should * not wait.
*/ if (!done) { if (!wait_event_timeout(msm_host->pwr_irq_wait,
msm_host->pwr_irq_flag,
msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
dev_warn(&msm_host->pdev->dev, "%s: pwr_irq for req: (%d) timed out\n",
mmc_hostname(host->mmc), req_type);
}
/* * There is a rare HW scenario where the first clear pulse could be * lost when actual reset and clear/read of status register is * happening at a time. Hence, retry for at least 10 times to make * sure status register is cleared. Otherwise, this will result in * a spurious power IRQ resulting in system instability.
*/ while (irq_status & msm_host_readl(msm_host, host,
msm_offset->core_pwrctl_status)) { if (retry == 0) {
pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
mmc_hostname(host->mmc), irq_status);
sdhci_msm_dump_pwr_ctrl_regs(host);
WARN_ON(1); break;
}
msm_host_writel(msm_host, irq_status, host,
msm_offset->core_pwrctl_clear);
retry--;
udelay(10);
}
/* * The driver has to acknowledge the interrupt, switch voltages and * report back if it succeded or not to this register. The voltage * switches are handled by the sdhci core, so just report success.
*/
msm_host_writel(msm_host, irq_ack, host,
msm_offset->core_pwrctl_ctl);
/* * If we don't have info regarding the voltage levels supported by * regulators, don't change the IO PAD PWR SWITCH.
*/ if (msm_host->caps_0 & CORE_VOLT_SUPPORT) {
u32 new_config; /* * We should unset IO PAD PWR switch only if the register write * can set IO lines high and the regulator also switches to 3 V. * Else, we should keep the IO PAD PWR switch set. * This is applicable to certain targets where eMMC vccq supply * is only 1.8V. In such targets, even during REQ_IO_HIGH, the * IO PAD PWR switch must be kept set to reflect actual * regulator voltage. This way, during initialization of * controllers with only 1.8V, we will set the IO PAD bit * without waiting for a REQ_IO_LOW.
*/
config = readl_relaxed(host->ioaddr +
msm_offset->core_vendor_spec);
new_config = config;
/* * __sdhci_msm_set_clock - sdhci_msm clock control. * * Description: * MSM controller does not use internal divider and * instead directly control the GCC clock as per * HW recommendation.
**/ staticvoid __sdhci_msm_set_clock(struct sdhci_host *host, unsignedint clock)
{
u16 clk;
sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
if (clock == 0) return;
/* * MSM controller do not use clock divider. * Thus read SDHCI_CLOCK_CONTROL and only enable * clock with no divider value programmed.
*/
clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
sdhci_enable_clk(host, clk);
}
/* * Currently this driver only supports AES-256-XTS. All known versions * of ICE support it, but to be safe make sure it is really declared in * the crypto capability registers. The crypto capability registers * also give the supported data unit size(s).
*/ for (i = 0; i < caps.num_crypto_cap; i++) {
cap.reg_val = cpu_to_le32(cqhci_readl(cq_host,
CQHCI_CRYPTOCAP +
i * sizeof(__le32))); if (cap.algorithm_id == CQHCI_CRYPTO_ALG_AES_XTS &&
cap.key_size == CQHCI_CRYPTO_KEY_SIZE_256)
profile->modes_supported[BLK_ENCRYPTION_MODE_AES_256_XTS] |=
cap.sdus_mask * 512;
}
/* * Program a key into a QC ICE keyslot. QC ICE requires a QC-specific SCM call * for this; it doesn't support the standard way.
*/ staticint sdhci_msm_ice_keyslot_program(struct blk_crypto_profile *profile, conststruct blk_crypto_key *key, unsignedint slot)
{ struct sdhci_msm_host *msm_host =
sdhci_msm_host_from_crypto_profile(profile);
/* * When CQE is halted, the legacy SDHCI path operates only * on 16-byte descriptors in 64bit mode.
*/ if (host->flags & SDHCI_USE_64_BIT_DMA)
host->desc_sz = 16;
spin_lock_irqsave(&host->lock, flags);
/* * During CQE command transfers, command complete bit gets latched. * So s/w should clear command complete interrupt status when CQE is * either halted or disabled. Otherwise unexpected SDCHI legacy * interrupt gets triggered when CQE is halted/disabled.
*/
ctrl = sdhci_readl(host, SDHCI_INT_ENABLE);
ctrl |= SDHCI_INT_RESPONSE;
sdhci_writel(host, ctrl, SDHCI_INT_ENABLE);
sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
/* * When CQE is halted, SDHC operates only on 16byte ADMA descriptors. * So ensure ADMA table is allocated for 16byte descriptors.
*/ if (host->caps & SDHCI_CAN_64BIT)
host->alloc_desc_sz = 16;
ret = sdhci_setup_host(host); if (ret) return ret;
cq_host = cqhci_pltfm_init(pdev); if (IS_ERR(cq_host)) {
ret = PTR_ERR(cq_host);
dev_err(&pdev->dev, "cqhci-pltfm init: failed: %d\n", ret); goto cleanup;
}
ret = sdhci_msm_ice_init(msm_host, cq_host); if (ret) goto cleanup;
ret = cqhci_init(cq_host, host->mmc, dma64); if (ret) {
dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n",
mmc_hostname(host->mmc), ret); goto cleanup;
}
/* Disable cqe reset due to cqe enable signal */
cqcfg = cqhci_readl(cq_host, CQHCI_VENDOR_CFG1);
cqcfg |= CQHCI_VENDOR_DIS_RST_ON_CQ_EN;
cqhci_writel(cq_host, cqcfg, CQHCI_VENDOR_CFG1);
/* * SDHC expects 12byte ADMA descriptors till CQE is enabled. * So limit desc_sz to 12 so that the data commands that are sent * during card initialization (before CQE gets enabled) would * get executed without any issues.
*/ if (host->flags & SDHCI_USE_64_BIT_DMA)
host->desc_sz = 12;
ret = __sdhci_add_host(host); if (ret) goto cleanup;
/* * Platform specific register write functions. This is so that, if any * register write needs to be followed up by platform specific actions, * they can be added here. These functions can go to sleep when writes * to certain registers are done. * These functions are relying on sdhci_set_ios not using spinlock.
*/ staticint __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg)
{ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
u32 req_type = 0;
switch (reg) { case SDHCI_HOST_CONTROL2:
req_type = (val & SDHCI_CTRL_VDD_180) ? REQ_IO_LOW :
REQ_IO_HIGH; break; case SDHCI_SOFTWARE_RESET: if (host->pwr && (val & SDHCI_RESET_ALL))
req_type = REQ_BUS_OFF; break; case SDHCI_POWER_CONTROL:
req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON; break; case SDHCI_TRANSFER_MODE:
msm_host->transfer_mode = val; break; case SDHCI_COMMAND: if (!msm_host->use_cdr) break; if ((msm_host->transfer_mode & SDHCI_TRNS_READ) &&
!mmc_op_tuning(SDHCI_GET_CMD(val)))
sdhci_msm_set_cdr(host, true);
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.