/* * Details of UIC Errors
*/ staticconstchar *const ufs_uic_err_str[] = { "PHY Adapter Layer", "Data Link Layer", "Network Link Layer", "Transport Link Layer", "DME"
};
staticconstchar *const ufs_uic_pa_err_str[] = { "PHY error on Lane 0", "PHY error on Lane 1", "PHY error on Lane 2", "PHY error on Lane 3", "Generic PHY Adapter Error. This should be the LINERESET indication"
};
/* * Turn on CLK_CG early to bypass abnormal ERR_CHK signal * to prevent host hang issue
*/
ufshcd_writel(hba,
ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
REG_UFS_XOUFS_CTRL);
/* Wait for ack */
timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US); do {
time_checked = ktime_get();
value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
/* Wait until ack bit equals to req bit */ if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST)) goto out;
usleep_range(100, 200);
} while (ktime_before(time_checked, timeout));
dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
/* * If clock on timeout, assume clock is off, notify tfa do clock * off setting.(keep DIFN disable, release resource) * If clock off timeout, assume clock will off finally, * set ref_clk_enabled directly.(keep DIFN disable, keep resource)
*/ if (on)
ufs_mtk_ref_clk_notify(false, POST_CHANGE, res); else
host->ref_clk_enabled = false;
return -ETIMEDOUT;
out:
host->ref_clk_enabled = on; if (on)
ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
/* cannot use plain ktime_get() in suspend */
timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
/* wait a specific time after check base */
udelay(10);
wait_idle = false;
do {
time_checked = ktime_get_mono_fast_ns();
ufs_mtk_dbg_sel(hba);
val = ufshcd_readl(hba, REG_UFS_PROBE);
sm = val & 0x1f;
/* * if state is in H8 enter and H8 enter confirm * wait until return to idle state.
*/ if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
wait_idle = true;
udelay(50); continue;
} elseif (!wait_idle) break;
if (wait_idle && (sm == VS_HCE_BASE)) break;
} while (time_checked < timeout);
if (wait_idle && sm != VS_HCE_BASE)
dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
}
timeout = ktime_add_ms(ktime_get(), max_wait_ms); do {
time_checked = ktime_get();
ufs_mtk_dbg_sel(hba);
val = ufshcd_readl(hba, REG_UFS_PROBE);
val = val >> 28;
if (val == state) return 0;
/* Sleep for max. 200us */
usleep_range(100, 200);
} while (ktime_before(time_checked, timeout));
cfg = host->crypt;
volt = cfg->vcore_volt;
reg = cfg->reg_vcore;
ret = clk_prepare_enable(cfg->clk_crypt_mux); if (ret) {
dev_info(hba->dev, "clk_prepare_enable(): %d\n",
ret); return;
}
if (boost) {
ret = regulator_set_voltage(reg, volt, INT_MAX); if (ret) {
dev_info(hba->dev, "failed to set vcore to %d\n", volt); goto out;
}
ret = clk_set_parent(cfg->clk_crypt_mux,
cfg->clk_crypt_perf); if (ret) {
dev_info(hba->dev, "failed to set clk_crypt_perf\n");
regulator_set_voltage(reg, 0, INT_MAX); goto out;
}
} else {
ret = clk_set_parent(cfg->clk_crypt_mux,
cfg->clk_crypt_lp); if (ret) {
dev_info(hba->dev, "failed to set clk_crypt_lp\n"); goto out;
}
ret = regulator_set_voltage(reg, 0, INT_MAX); if (ret) {
dev_info(hba->dev, "failed to set vcore to MIN\n");
}
}
out:
clk_disable_unprepare(cfg->clk_crypt_mux);
}
for (i = 0; i < host->mcq_nr_intr; i++) {
irq = host->mcq_intr_info[i].irq;
enable_irq(irq);
}
host->is_mcq_intr_enabled = true;
}
/** * ufs_mtk_setup_clocks - enables/disable clocks * @hba: host controller instance * @on: If true, enable clocks else disable them. * @status: PRE_CHANGE or POST_CHANGE notify * * Return: 0 on success, non-zero on failure.
*/ staticint ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on, enum ufs_notify_change_status status)
{ struct ufs_mtk_host *host = ufshcd_get_variant(hba); bool clk_pwr_off = false; int ret = 0;
/* * In case ufs_mtk_init() is not yet done, simply ignore. * This ufs_mtk_setup_clocks() shall be called from * ufs_mtk_init() after init is done.
*/ if (!host) return 0;
if (!on && status == PRE_CHANGE) { if (ufshcd_is_link_off(hba)) {
clk_pwr_off = true;
} elseif (ufshcd_is_link_hibern8(hba) ||
(!ufshcd_can_hibern8_during_gating(hba) &&
ufshcd_is_auto_hibern8_enabled(hba))) { /* * Gate ref-clk and poweroff mphy if link state is in * OFF or Hibern8 by either Auto-Hibern8 or * ufshcd_link_state_transition().
*/
ret = ufs_mtk_wait_link_state(hba,
VS_LINK_HIBERN8,
15); if (!ret)
clk_pwr_off = true;
}
if (clk_pwr_off)
ufs_mtk_pwr_ctrl(hba, false);
ufs_mtk_mcq_disable_irq(hba);
} elseif (on && status == POST_CHANGE) {
ufs_mtk_pwr_ctrl(hba, true);
ufs_mtk_mcq_enable_irq(hba);
}
switch (hw_ip_ver) { case IP_LEGACY_VER_MT6893: case IP_LEGACY_VER_MT6781: /* can add other legacy chipset ID here accordingly */
is_legacy = true; break; default: break;
}
dev_info(hba->dev, "legacy IP version - 0x%x, is legacy : %d", hw_ip_ver, is_legacy);
return is_legacy;
}
/* * HW version format has been changed from 01MMmmmm to 1MMMmmmm, since * project MT6878. In order to perform correct version comparison, * version number is changed by SW for the following projects. * IP_VER_MT6983 0x00360000 to 0x10360000 * IP_VER_MT6897 0x01440000 to 0x10440000 * IP_VER_MT6989 0x01450000 to 0x10450000 * IP_VER_MT6991 0x01460000 to 0x10460000
*/ staticvoid ufs_mtk_get_hw_ip_version(struct ufs_hba *hba)
{ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
u32 hw_ip_ver;
staticvoid ufs_mtk_get_controller_version(struct ufs_hba *hba)
{ struct ufs_mtk_host *host = ufshcd_get_variant(hba); int ret, ver = 0;
if (host->hw_ver.major) return;
/* Set default (minimum) version anyway */
host->hw_ver.major = 2;
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver); if (!ret) { if (ver >= UFS_UNIPRO_VER_1_8) {
host->hw_ver.major = 3; /* * Fix HCI version for some platforms with * incorrect version
*/ if (hba->ufs_version < ufshci_version(3, 0))
hba->ufs_version = ufshci_version(3, 0);
}
}
}
if (!ufs_mtk_is_clk_scale_ready(hba)) {
hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
dev_info(hba->dev, "%s: Clk-scaling not ready. Feature disabled.",
__func__); return;
}
/* * Default get vcore if dts have these settings. * No matter clock scaling support or not. (may disable by customer)
*/
reg = devm_regulator_get_optional(dev, "dvfsrc-vcore"); if (IS_ERR(reg)) {
dev_info(dev, "failed to get dvfsrc-vcore: %ld",
PTR_ERR(reg)); return;
}
if (of_property_read_u32(dev->of_node, "clk-scale-up-vcore-min",
&volt)) {
dev_info(dev, "failed to get clk-scale-up-vcore-min"); return;
}
host->mclk.reg_vcore = reg;
host->mclk.vcore_volt = volt;
/* If default boot is max gear, request vcore */ if (reg && volt && host->clk_scale_up) { if (regulator_set_voltage(reg, volt, INT_MAX)) {
dev_info(hba->dev, "Failed to set vcore to %d\n", volt);
}
}
}
if (ufshcd_is_auto_hibern8_supported(hba)) { switch (hba->dev_info.wmanufacturerid) { case UFS_VENDOR_SAMSUNG: /* configure auto-hibern8 timer to 3.5 ms */
us = 3500; break;
case UFS_VENDOR_MICRON: /* configure auto-hibern8 timer to 2 ms */
us = 2000; break;
default: /* configure auto-hibern8 timer to 1 ms */
us = 1000; break;
}
hba->ahit = ufs_mtk_us_to_ahit(us);
}
ufs_mtk_setup_clk_gating(hba);
}
staticvoid ufs_mtk_init_mcq_irq(struct ufs_hba *hba)
{ struct ufs_mtk_host *host = ufshcd_get_variant(hba); struct platform_device *pdev; int i; int irq;
ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params); if (ret) {
pr_info("%s: failed to determine capabilities\n",
__func__);
}
if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
ret = ufshcd_dme_set(hba,
UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
lpm ? 1 : 0); if (!ret || !lpm) { /* * Forcibly set as non-LPM mode if UIC commands is failed * to use default hba_enable_delay_us value for re-enabling * the host.
*/
host->unipro_lpm = lpm;
}
return ret;
}
staticint ufs_mtk_pre_link(struct ufs_hba *hba)
{ int ret;
u32 tmp;
ufs_mtk_get_controller_version(hba);
ret = ufs_mtk_unipro_set_lpm(hba, false); if (ret) return ret;
/* * Setting PA_Local_TX_LCC_Enable to 0 before link startup * to make sure that both host and device TX LCC are disabled * once link startup is completed.
*/
ret = ufshcd_disable_host_tx_lcc(hba); if (ret) return ret;
/* disable deep stall */
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp); if (ret) return ret;
tmp &= ~(1 << 6);
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
staticint ufs_mtk_link_startup_notify(struct ufs_hba *hba, enum ufs_notify_change_status stage)
{ int ret = 0;
switch (stage) { case PRE_CHANGE:
ret = ufs_mtk_pre_link(hba); break; case POST_CHANGE:
ufs_mtk_post_link(hba); break; default:
ret = -EINVAL; break;
}
/* disable hba in middle of device reset */
ufshcd_hba_stop(hba);
/* * The reset signal is active low. UFS devices shall detect * more than or equal to 1us of positive or negative RST_n * pulse width. * * To be on safe side, keep the reset low for at least 10us.
*/
usleep_range(10, 15);
ufs_mtk_device_reset_ctrl(1, res);
/* Some devices may need time to respond to rst_n */
usleep_range(10000, 15000);
dev_info(hba->dev, "device reset done\n");
return 0;
}
staticint ufs_mtk_link_set_hpm(struct ufs_hba *hba)
{ int err;
err = ufshcd_hba_enable(hba); if (err) return err;
err = ufs_mtk_unipro_set_lpm(hba, false); if (err) return err;
err = ufshcd_uic_hibern8_exit(hba); if (err) return err;
/* Check link state to make sure exit h8 success */
ufs_mtk_wait_idle_state(hba, 5);
err = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100); if (err) {
dev_warn(hba->dev, "exit h8 state fail, err=%d\n", err); return err;
}
ufshcd_set_link_active(hba);
err = ufshcd_make_hba_operational(hba); if (err) return err;
if (hba->mcq_enabled) {
ufs_mtk_config_mcq(hba, false);
ufshcd_mcq_make_queues_operational(hba);
ufshcd_mcq_config_mac(hba, hba->nutrs);
ufshcd_mcq_enable(hba);
}
return 0;
}
staticint ufs_mtk_link_set_lpm(struct ufs_hba *hba)
{ int err;
/* Prevent entering LPM when device is still active */ if (lpm && ufshcd_is_ufs_dev_active(hba)) return;
/* Skip vccqx lpm control and control vsx only */ if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
skip_vccqx = true;
/* VCC is always-on, control vsx only */ if (!hba->vreg_info.vcc)
skip_vccqx = true;
/* Broken vcc keep vcc always on, most case control vsx only */ if (lpm && hba->vreg_info.vcc && hba->vreg_info.vcc->enabled) { /* Some device vccqx/vsx can enter lpm */ if (ufs_mtk_is_allow_vccqx_lpm(hba))
skip_vccqx = false; else/* control vsx only */
skip_vccqx = true;
}
if (lpm) { if (!skip_vccqx)
ufs_mtk_vccqx_set_lpm(hba, lpm);
ufs_mtk_vsx_set_lpm(hba, lpm);
} else {
ufs_mtk_vsx_set_lpm(hba, lpm); if (!skip_vccqx)
ufs_mtk_vccqx_set_lpm(hba, lpm);
}
}
if (status == PRE_CHANGE) { if (ufshcd_is_auto_hibern8_supported(hba)) return ufs_mtk_auto_hibern8_disable(hba); return 0;
}
if (ufshcd_is_link_hibern8(hba)) {
err = ufs_mtk_link_set_lpm(hba); if (err) goto fail;
}
if (!ufshcd_is_link_active(hba)) { /* * Make sure no error will be returned to prevent * ufshcd_suspend() re-enabling regulators while vreg is still * in low-power mode.
*/
err = ufs_mtk_mphy_power_on(hba, false); if (err) goto fail;
}
if (ufshcd_is_link_off(hba))
ufs_mtk_device_reset_ctrl(0, res);
ufs_mtk_sram_pwr_ctrl(false, res);
return 0;
fail: /* * Set link as off state enforcedly to trigger * ufshcd_host_reset_and_restore() in ufshcd_suspend() * for completed host reset.
*/
ufshcd_set_link_off(hba); return -EAGAIN;
}
if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
ufs_mtk_dev_vreg_set_lpm(hba, false);
ufs_mtk_sram_pwr_ctrl(true, res);
err = ufs_mtk_mphy_power_on(hba, true); if (err) goto fail;
if (ufshcd_is_link_hibern8(hba)) {
err = ufs_mtk_link_set_hpm(hba); if (err) goto fail;
}
return 0;
fail: /* * Check if the platform (parent) device has resumed, and ensure that * power, clock, and MTCMOS are all turned on.
*/
err = ufshcd_link_recovery(hba); if (err) {
dev_err(hba->dev, "Device PM: req=%d, status:%d, err:%d\n",
hba->dev->power.request,
hba->dev->power.runtime_status,
hba->dev->power.runtime_error);
}
return 0; /* Cannot return a failure, otherwise, the I/O will hang. */
}
if (hba->mcq_enabled) { /* Iterate all cpus to set affinity for mcq irqs */ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
ufs_mtk_mcq_set_irq_affinity(hba, cpu);
}
if (mid == UFS_VENDOR_SAMSUNG) {
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10);
} elseif (mid == UFS_VENDOR_MICRON) { /* Only for the host which have TX skew issue */ if (ufs_mtk_is_tx_skew_fix(hba) &&
(STR_PRFX_EQUAL("MT128GBCAV2U31", dev_info->model) ||
STR_PRFX_EQUAL("MT256GBCAV4U31", dev_info->model) ||
STR_PRFX_EQUAL("MT512GBCAV8U31", dev_info->model) ||
STR_PRFX_EQUAL("MT256GBEAX4U40", dev_info->model) ||
STR_PRFX_EQUAL("MT512GAYAX4U40", dev_info->model) ||
STR_PRFX_EQUAL("MT001TAYAX8U40", dev_info->model))) {
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 8);
}
}
/* * Decide waiting time before gating reference clock and * after ungating reference clock according to vendors' * requirements.
*/ if (mid == UFS_VENDOR_SAMSUNG)
ufs_mtk_setup_ref_clk_wait_us(hba, 1); elseif (mid == UFS_VENDOR_SKHYNIX)
ufs_mtk_setup_ref_clk_wait_us(hba, 30); elseif (mid == UFS_VENDOR_TOSHIBA)
ufs_mtk_setup_ref_clk_wait_us(hba, 100); else
ufs_mtk_setup_ref_clk_wait_us(hba,
REFCLK_DEFAULT_WAIT_US); return 0;
}
reg = host->mclk.reg_vcore;
volt = host->mclk.vcore_volt; if (reg && volt != 0)
clk_bind_vcore = true;
if (mclk->ufs_fde_max_clki && mclk->ufs_fde_min_clki)
clk_fde_scale = true;
ret = clk_prepare_enable(clki->clk); if (ret) {
dev_info(hba->dev, "clk_prepare_enable() fail, ret: %d\n", ret); return;
}
if (clk_fde_scale) {
ret = clk_prepare_enable(fde_clki->clk); if (ret) {
dev_info(hba->dev, "fde clk_prepare_enable() fail, ret: %d\n", ret); return;
}
}
if (scale_up) { if (clk_bind_vcore) {
ret = regulator_set_voltage(reg, volt, INT_MAX); if (ret) {
dev_info(hba->dev, "Failed to set vcore to %d\n", volt); goto out;
}
}
ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk); if (ret) {
dev_info(hba->dev, "Failed to set clk mux, ret = %d\n",
ret);
}
if (clk_fde_scale) {
ret = clk_set_parent(fde_clki->clk,
mclk->ufs_fde_max_clki->clk); if (ret) {
dev_info(hba->dev, "Failed to set fde clk mux, ret = %d\n",
ret);
}
}
} else { if (clk_fde_scale) {
ret = clk_set_parent(fde_clki->clk,
mclk->ufs_fde_min_clki->clk); if (ret) {
dev_info(hba->dev, "Failed to set fde clk mux, ret = %d\n",
ret); goto out;
}
}
ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk); if (ret) {
dev_info(hba->dev, "Failed to set clk mux, ret = %d\n",
ret); goto out;
}
if (clk_bind_vcore) {
ret = regulator_set_voltage(reg, 0, INT_MAX); if (ret) {
dev_info(hba->dev, "failed to set vcore to MIN\n");
}
}
}
out:
clk_disable_unprepare(clki->clk);
if (clk_fde_scale)
clk_disable_unprepare(fde_clki->clk);
}
/** * ufs_mtk_clk_scale - Internal clk scaling operation * * MTK platform supports clk scaling by switching parent of ufs_sel(mux). * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware. * Max and min clocks rate of ufs_sel defined in dts should match rate of * "ufs_sel_max_src" and "ufs_sel_min_src" respectively. * This prevent changing rate of pll clock that is shared between modules. * * @hba: per adapter instance * @scale_up: True for scaling up and false for scaling down
*/ staticvoid ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
{ struct ufs_mtk_host *host = ufshcd_get_variant(hba); struct ufs_mtk_clk *mclk = &host->mclk; struct ufs_clk_info *clki = mclk->ufs_sel_clki;
if (host->clk_scale_up == scale_up) goto out;
if (scale_up)
_ufs_mtk_clk_scale(hba, true); else
_ufs_mtk_clk_scale(hba, false);
host->clk_scale_up = scale_up;
/* Must always set before clk_set_rate() */ if (scale_up)
clki->curr_freq = clki->max_freq; else
clki->curr_freq = clki->min_freq;
out:
trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk));
}
/* fail mcq initialization if interrupt is not filled properly */ if (!host->mcq_nr_intr) {
dev_info(hba->dev, "IRQs not ready. MCQ disabled."); return -EINVAL;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.