/* * Currently this driver only supports AES-256-XTS. All known versions * of ICE support it, but to be safe make sure it is really declared in * the crypto capability registers. The crypto capability registers * also give the supported data unit size(s).
*/ for (i = 0; i < caps.num_crypto_cap; i++) {
cap.reg_val = cpu_to_le32(ufshcd_readl(hba,
REG_UFS_CRYPTOCAP +
i * sizeof(__le32))); if (cap.algorithm_id == UFS_CRYPTO_ALG_AES_XTS &&
cap.key_size == UFS_CRYPTO_KEY_SIZE_256)
profile->modes_supported[BLK_ENCRYPTION_MODE_AES_256_XTS] |=
cap.sdus_mask * 512;
}
do {
err = ufshcd_dme_get(hba,
UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
&tx_fsm_val); if (err || tx_fsm_val == TX_FSM_HIBERN8) break;
/* sleep for max. 200us */
usleep_range(100, 200);
} while (time_before(jiffies, timeout));
/* * we might have scheduled out for long during polling so * check the state again.
*/ if (time_after(jiffies, timeout))
err = ufshcd_dme_get(hba,
UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
&tx_fsm_val);
ret = reset_control_assert(host->core_reset); if (ret) {
dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
__func__, ret); return ret;
}
/* * The hardware requirement for delay between assert/deassert * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to * ~125us (4/32768). To be on the safe side add 200us delay.
*/
usleep_range(200, 210);
ret = reset_control_deassert(host->core_reset); if (ret) {
dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
__func__, ret); return ret;
}
/* * HW ver 5 can only support up to HS-G5 Rate-A due to HW limitations. * If the HS-G5 PHY gear is used, update host_params->hs_rate to Rate-A, * so that the subsequent power mode change shall stick to Rate-A.
*/ if (host->hw_ver.major == 0x5) { if (host->phy_gear == UFS_HS_G5)
host_params->hs_rate = PA_HS_MODE_A; else
host_params->hs_rate = PA_HS_MODE_B;
}
/* Reset UFS Host Controller and PHY */
ret = ufs_qcom_host_reset(hba); if (ret) return ret;
if (phy->power_count)
phy_power_off(phy);
/* phy initialization - calibrate the phy */
ret = phy_init(phy); if (ret) {
dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
__func__, ret); return ret;
}
ret = phy_set_mode_ext(phy, mode, host->phy_gear); if (ret) goto out_disable_phy;
/* power on phy - start serdes and phy's power and clocks */
ret = phy_power_on(phy); if (ret) {
dev_err(hba->dev, "%s: phy power on failed, ret = %d\n",
__func__, ret); goto out_disable_phy;
}
ret = phy_calibrate(phy); if (ret) {
dev_err(hba->dev, "Failed to calibrate PHY: %d\n", ret); goto out_disable_phy;
}
ufs_qcom_select_unipro_mode(host);
return 0;
out_disable_phy:
phy_exit(phy);
return ret;
}
/* * The UTP controller has a number of internal clock gating cells (CGCs). * Internal hardware sub-modules within the UTP controller control the CGCs. * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved * in a specific operation, UTP controller CGCs are by default disabled and * this function enables them (after every UFS link startup) to save some power * leakage.
*/ staticvoid ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
{ int err;
switch (status) { case PRE_CHANGE:
err = ufs_qcom_power_up_sequence(hba); if (err) return err;
/* * The PHY PLL output is the source of tx/rx lane symbol * clocks, hence, enable the lane clocks only after PHY * is initialized.
*/
err = ufs_qcom_enable_lane_clks(host); break; case POST_CHANGE: /* check if UFS PHY moved from DISABLED to HIBERN8 */
err = ufs_qcom_check_hibern8(hba);
ufs_qcom_enable_hw_clk_gating(hba);
ufs_qcom_ice_enable(host);
ufs_qcom_config_ice_allocator(host); break; default:
dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
err = -EINVAL; break;
} return err;
}
/** * ufs_qcom_cfg_timers - Configure ufs qcom cfg timers * * @hba: host controller instance * @is_pre_scale_up: flag to check if pre scale up condition. * @freq: target opp freq * Return: zero for success and non-zero in case of a failure.
*/ staticint ufs_qcom_cfg_timers(struct ufs_hba *hba, bool is_pre_scale_up, unsignedlong freq)
{ struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct ufs_clk_info *clki; unsignedlong clk_freq = 0;
u32 core_clk_cycles_per_us;
/* * UTP controller uses SYS1CLK_1US_REG register for Interrupt * Aggregation logic. * It is mandatory to write SYS1CLK_1US_REG register on UFS host * controller V4.0.0 onwards.
*/ if (host->hw_ver.major < 4 && !ufshcd_is_intr_aggr_allowed(hba)) return 0;
if (hba->use_pm_opp && freq != ULONG_MAX) {
clk_freq = ufs_qcom_opp_freq_to_clk_freq(hba, freq, "core_clk"); if (clk_freq) goto cfg_timers;
}
list_for_each_entry(clki, &hba->clk_list_head, list) { if (!strcmp(clki->name, "core_clk")) { if (freq == ULONG_MAX) {
clk_freq = clki->max_freq; break;
}
switch (status) { case PRE_CHANGE: if (ufs_qcom_cfg_timers(hba, false, ULONG_MAX)) {
dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
__func__); return -EINVAL;
}
err = ufs_qcom_set_core_clk_ctrl(hba, true, ULONG_MAX); if (err)
dev_err(hba->dev, "cfg core clk ctrl failed\n"); /* * Some UFS devices (and may be host) have issues if LCC is * enabled. So we are setting PA_Local_TX_LCC_Enable to 0 * before link startup which will make sure that both host * and device TX LCC are disabled once link startup is * completed.
*/
err = ufshcd_disable_host_tx_lcc(hba);
err = ufs_qcom_enable_lane_clks(host); if (err) return err;
if ((!ufs_qcom_is_link_active(hba)) &&
host->hw_ver.major == 5 &&
host->hw_ver.minor == 0 &&
host->hw_ver.step == 0) {
ufshcd_writel(hba, UFS_ICE_SYNC_RST_SEL | UFS_ICE_SYNC_RST_SW, UFS_MEM_ICE_CFG);
reg_val = ufshcd_readl(hba, UFS_MEM_ICE_CFG);
reg_val &= ~(UFS_ICE_SYNC_RST_SEL | UFS_ICE_SYNC_RST_SW); /* * HW documentation doesn't recommend any delay between the * reset set and clear. But we are enforcing an arbitrary delay * to give flops enough time to settle in.
*/
usleep_range(50, 100);
ufshcd_writel(hba, reg_val, UFS_MEM_ICE_CFG);
ufshcd_readl(hba, UFS_MEM_ICE_CFG);
}
if (enable)
temp |= host->dev_ref_clk_en_mask; else
temp &= ~host->dev_ref_clk_en_mask;
/* * If we are here to disable this clock it might be immediately * after entering into hibern8 in which case we need to make * sure that device ref_clk is active for specific time after * hibern8 enter.
*/ if (!enable) { unsignedlong gating_wait;
gating_wait = host->hba->dev_info.clk_gating_wait_us; if (!gating_wait) {
udelay(1);
} else { /* * bRefClkGatingWaitTime defines the minimum * time for which the reference clock is * required by device during transition from * HS-MODE to LS-MODE or HIBERN8 state. Give it * more delay to be on the safe side.
*/
gating_wait += 10;
usleep_range(gating_wait, gating_wait + 10);
}
}
/* * Make sure the write to ref_clk reaches the destination and * not stored in a Write Buffer (WB).
*/
readl(host->dev_ref_clk_ctrl_mmio);
/* * If we call hibern8 exit after this, we need to make sure that * device ref_clk is stable for at least 1us before the hibern8 * exit command.
*/ if (enable)
udelay(1);
ret = icc_set_bw(host->icc_ddr, 0, mem_bw); if (ret < 0) {
dev_err(dev, "failed to set bandwidth request: %d\n", ret); return ret;
}
ret = icc_set_bw(host->icc_cpu, 0, cfg_bw); if (ret < 0) {
dev_err(dev, "failed to set bandwidth request: %d\n", ret); return ret;
}
return 0;
}
staticstruct __ufs_qcom_bw_table ufs_qcom_get_bw_table(struct ufs_qcom_host *host)
{ struct ufs_pa_layer_attr *p = &host->dev_req_params; int gear = max_t(u32, p->gear_rx, p->gear_tx); int lane = max_t(u32, p->lane_rx, p->lane_tx);
if (WARN_ONCE(gear > QCOM_UFS_MAX_GEAR, "ICC scaling for UFS Gear (%d) not supported. Using Gear (%d) bandwidth\n",
gear, QCOM_UFS_MAX_GEAR))
gear = QCOM_UFS_MAX_GEAR;
if (WARN_ONCE(lane > QCOM_UFS_MAX_LANE, "ICC scaling for UFS Lane (%d) not supported. Using Lane (%d) bandwidth\n",
lane, QCOM_UFS_MAX_LANE))
lane = QCOM_UFS_MAX_LANE;
if (ufshcd_is_hs_mode(p)) { if (p->hs_rate == PA_HS_MODE_B) return ufs_qcom_bw_table[MODE_HS_RB][gear][lane]; else return ufs_qcom_bw_table[MODE_HS_RA][gear][lane];
} else { return ufs_qcom_bw_table[MODE_PWM][gear][lane];
}
}
/* Determine the equalizer value based on the gear */
equalizer_val = (gear == 5) ? DEEMPHASIS_3_5_dB : NO_DEEMPHASIS;
for (i = 0; i < tx_lanes; i++) {
ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HS_EQUALIZER, i),
equalizer_val); if (ret)
dev_err(hba->dev, "%s: failed equalizer lane %d\n",
__func__, i);
}
}
if (!dev_req_params) {
pr_err("%s: incoming dev_req_params is NULL\n", __func__); return -EINVAL;
}
switch (status) { case PRE_CHANGE:
ret = ufshcd_negotiate_pwr_params(host_params, dev_max_params, dev_req_params); if (ret) {
dev_err(hba->dev, "%s: failed to determine capabilities\n",
__func__); return ret;
}
/* * During UFS driver probe, always update the PHY gear to match the negotiated * gear, so that, if quirk UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is enabled, * the second init can program the optimal PHY settings. This allows one to start * the first init with either the minimum or the maximum support gear.
*/ if (hba->ufshcd_state == UFSHCD_STATE_RESET) { /* * Skip REINIT if the negotiated gear matches with the * initial phy_gear. Otherwise, update the phy_gear to * program the optimal gear setting during REINIT.
*/ if (host->phy_gear == dev_req_params->gear_tx)
hba->quirks &= ~UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH; else
host->phy_gear = dev_req_params->gear_tx;
}
/* enable the device ref clock before changing to HS mode */ if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
ufshcd_is_hs_mode(dev_req_params))
ufs_qcom_dev_ref_clk_ctrl(host, true);
if (host->hw_ver.major >= 0x4) {
ufshcd_dme_configure_adapt(hba,
dev_req_params->gear_tx,
PA_INITIAL_ADAPT);
}
if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TX_DEEMPHASIS_TUNING)
ufs_qcom_set_tx_hs_equalizer(hba,
dev_req_params->gear_tx, dev_req_params->lane_tx);
break; case POST_CHANGE: /* cache the power mode parameters to use internally */
memcpy(&host->dev_req_params,
dev_req_params, sizeof(*dev_req_params));
ufs_qcom_icc_update_bw(host);
/* disable the device ref clock if entered PWM mode */ if (ufshcd_is_hs_mode(&hba->pwr_info) &&
!ufshcd_is_hs_mode(dev_req_params))
ufs_qcom_dev_ref_clk_ctrl(host, false); break; default:
ret = -EINVAL; break;
}
return ret;
}
staticint ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
{ int err;
u32 pa_vs_config_reg1;
err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
&pa_vs_config_reg1); if (err) return err;
/* Allow extension of MSB bits of PA_SaveConfigTime attribute */ return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
(pa_vs_config_reg1 | (1 << 12)));
}
staticvoid ufs_qcom_override_pa_tx_hsg1_sync_len(struct ufs_hba *hba)
{ int err;
err = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TX_HSG1_SYNC_LENGTH),
PA_TX_HSG1_SYNC_LENGTH_VAL); if (err)
dev_err(hba->dev, "Failed (%d) set PA_TX_HSG1_SYNC_LENGTH\n", err);
}
staticint ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
{ int err = 0;
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TX_HSG1_SYNC_LENGTH)
ufs_qcom_override_pa_tx_hsg1_sync_len(hba);
/** * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks * @hba: host controller instance * * QCOM UFS host controller might have some non standard behaviours (quirks) * than what is specified by UFSHCI specification. Advertise all such * quirks to standard UFS host controller driver so standard takes them into * account.
*/ staticvoid ufs_qcom_advertise_quirks(struct ufs_hba *hba)
{ conststruct ufs_qcom_drvdata *drvdata = of_device_get_match_data(hba->dev); struct ufs_qcom_host *host = ufshcd_get_variant(hba);
if (host->hw_ver.major == 0x2)
hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
if (host->hw_ver.major > 0x3)
hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
if (drvdata && drvdata->quirks)
hba->quirks |= drvdata->quirks;
}
/* * Default to powering up the PHY to the max gear possible, which is * backwards compatible with lower gears but not optimal from * a power usage point of view. After device negotiation, if the * gear is lower a reinit will be performed to program the PHY * to the ideal gear for this combo of controller and device.
*/
host->phy_gear = host_params->hs_tx_gear;
if (host->hw_ver.major < 0x4) { /* * These controllers only have one PHY init sequence, * let's power up the PHY using that (the minimum supported * gear, UFS_HS_G2).
*/
host->phy_gear = UFS_HS_G2;
} elseif (host->hw_ver.major >= 0x5) {
val = ufshcd_readl(host->hba, REG_UFS_DEBUG_SPARE_CFG);
dev_major = FIELD_GET(UFS_DEV_VER_MAJOR_MASK, val);
/* * Since the UFS device version is populated, let's remove the * REINIT quirk as the negotiated gear won't change during boot. * So there is no need to do reinit.
*/ if (dev_major != 0x0)
host->hba->quirks &= ~UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
/* * For UFS 3.1 device and older, power up the PHY using HS-G4 * PHY gear to save power.
*/ if (dev_major > 0x0 && dev_major < 0x4)
host->phy_gear = UFS_HS_G4;
}
}
/** * ufs_qcom_setup_clocks - enables/disable clocks * @hba: host controller instance * @on: If true, enable clocks else disable them. * @status: PRE_CHANGE or POST_CHANGE notify * * There are certain clocks which comes from the PHY so it needs * to be managed together along with controller clocks which also * provides a better power saving. Hence keep phy_power_off/on calls * in ufs_qcom_setup_clocks, so that PHY's regulators & clks can be * turned on/off along with UFS's clocks. * * Return: 0 on success, non-zero on failure.
*/ staticint ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, enum ufs_notify_change_status status)
{ struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct phy *phy; int err;
/* * In case ufs_qcom_init() is not yet done, simply ignore. * This ufs_qcom_setup_clocks() shall be called from * ufs_qcom_init() after init is done.
*/ if (!host) return 0;
phy = host->generic_phy;
switch (status) { case PRE_CHANGE: if (on) {
ufs_qcom_icc_update_bw(host); if (ufs_qcom_is_link_hibern8(hba)) {
err = ufs_qcom_enable_lane_clks(host); if (err) {
dev_err(hba->dev, "enable lane clks failed, ret=%d\n", err); return err;
}
}
} else { if (!ufs_qcom_is_link_active(hba)) { /* disable device ref_clk */
ufs_qcom_dev_ref_clk_ctrl(host, false);
}
err = phy_power_off(phy); if (err) {
dev_err(hba->dev, "phy power off failed, ret=%d\n", err); return err;
}
} break; case POST_CHANGE: if (on) {
err = phy_power_on(phy); if (err) {
dev_err(hba->dev, "phy power on failed, ret = %d\n", err); return err;
}
/* enable the device ref clock for HS mode*/ if (ufshcd_is_hs_mode(&hba->pwr_info))
ufs_qcom_dev_ref_clk_ctrl(host, true);
} else { if (ufs_qcom_is_link_hibern8(hba))
ufs_qcom_disable_lane_clks(host);
/* * after reset deassertion, phy will need all ref clocks, * voltage, current to settle down before starting serdes.
*/
usleep_range(1000, 1100); return 0;
}
host->icc_ddr = devm_of_icc_get(dev, "ufs-ddr"); if (IS_ERR(host->icc_ddr)) return dev_err_probe(dev, PTR_ERR(host->icc_ddr), "failed to acquire interconnect path\n");
host->icc_cpu = devm_of_icc_get(dev, "cpu-ufs"); if (IS_ERR(host->icc_cpu)) return dev_err_probe(dev, PTR_ERR(host->icc_cpu), "failed to acquire interconnect path\n");
/* * Set Maximum bandwidth vote before initializing the UFS controller and * device. Ideally, a minimal interconnect vote would suffice for the * initialization, but a max vote would allow faster initialization.
*/
ret = ufs_qcom_icc_set_bw(host, ufs_qcom_bw_table[MODE_MAX][0][0].mem_bw,
ufs_qcom_bw_table[MODE_MAX][0][0].cfg_bw); if (ret < 0) return dev_err_probe(dev, ret, "failed to set bandwidth request\n");
return 0;
}
/** * ufs_qcom_init - bind phy with controller * @hba: host controller instance * * Binds PHY with controller and powers up PHY enabling clocks * and regulators. * * Return: -EPROBE_DEFER if binding fails, returns negative error * on phy power up failure and returns zero on success.
*/ staticint ufs_qcom_init(struct ufs_hba *hba)
{ int err; struct device *dev = hba->dev; struct ufs_qcom_host *host; struct ufs_clk_info *clki; conststruct ufs_qcom_drvdata *drvdata = of_device_get_match_data(hba->dev);
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); if (!host) return -ENOMEM;
/* Make a two way bind between the qcom host and the hba */
host->hba = hba;
ufshcd_set_variant(hba, host);
/* Setup the optional reset control of HCI */
host->core_reset = devm_reset_control_get_optional(hba->dev, "rst"); if (IS_ERR(host->core_reset)) {
err = dev_err_probe(dev, PTR_ERR(host->core_reset), "Failed to get reset control\n"); goto out_variant_clear;
}
/* Fire up the reset controller. Failure here is non-fatal. */
host->rcdev.of_node = dev->of_node;
host->rcdev.ops = &ufs_qcom_reset_ops;
host->rcdev.owner = dev->driver->owner;
host->rcdev.nr_resets = 1;
err = devm_reset_controller_register(dev, &host->rcdev); if (err)
dev_warn(dev, "Failed to register reset controller\n");
if (!has_acpi_companion(dev)) {
host->generic_phy = devm_phy_get(dev, "ufsphy"); if (IS_ERR(host->generic_phy)) {
err = dev_err_probe(dev, PTR_ERR(host->generic_phy), "Failed to get PHY\n"); goto out_variant_clear;
}
}
err = ufs_qcom_icc_init(host); if (err) goto out_variant_clear;
/** * ufs_qcom_set_clk_40ns_cycles - Configure 40ns clk cycles * * @hba: host controller instance * @cycles_in_1us: No of cycles in 1us to be configured * * Returns error if dme get/set configuration for 40ns fails * and returns zero on success.
*/ staticint ufs_qcom_set_clk_40ns_cycles(struct ufs_hba *hba,
u32 cycles_in_1us)
{ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
u32 cycles_in_40ns;
u32 reg; int err;
/* * UFS host controller V4.0.0 onwards needs to program * PA_VS_CORE_CLK_40NS_CYCLES attribute per programmed * frequency of unipro core clk of UFS host controller.
*/ if (host->hw_ver.major < 4) return 0;
/* * Generic formulae for cycles_in_40ns = (freq_unipro/25) is not * applicable for all frequencies. For ex: ceil(37.5 MHz/25) will * be 2 and ceil(403 MHZ/25) will be 17 whereas Hardware * specification expect to be 16. Hence use exact hardware spec * mandated value for cycles_in_40ns instead of calculating using * generic formulae.
*/ switch (cycles_in_1us) { case UNIPRO_CORE_CLK_FREQ_403_MHZ:
cycles_in_40ns = 16; break; case UNIPRO_CORE_CLK_FREQ_300_MHZ:
cycles_in_40ns = 12; break; case UNIPRO_CORE_CLK_FREQ_201_5_MHZ:
cycles_in_40ns = 8; break; case UNIPRO_CORE_CLK_FREQ_150_MHZ:
cycles_in_40ns = 6; break; case UNIPRO_CORE_CLK_FREQ_100_MHZ:
cycles_in_40ns = 4; break; case UNIPRO_CORE_CLK_FREQ_75_MHZ:
cycles_in_40ns = 3; break; case UNIPRO_CORE_CLK_FREQ_37_5_MHZ:
cycles_in_40ns = 2; break; default:
dev_err(hba->dev, "UNIPRO clk freq %u MHz not supported\n",
cycles_in_1us); return -EINVAL;
}
err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CORE_CLK_40NS_CYCLES), ®); if (err) return err;
/* ensure below dumps occur only in task context due to blocking calls. */ if (in_task()) { /* Dump MCQ Host Vendor Specific Registers */ if (hba->mcq_enabled)
ufs_qcom_dump_mcq_hci_regs(hba);
/* voluntarily yield the CPU as we are dumping too much data */
ufshcd_dump_regs(hba, UFS_TEST_BUS, 4, "UFS_TEST_BUS ");
cond_resched();
ufs_qcom_dump_testbus(hba);
}
}
/** * ufs_qcom_device_reset() - toggle the (optional) device reset line * @hba: per-adapter instance * * Toggles the (optional) reset line to reset the attached device.
*/ staticint ufs_qcom_device_reset(struct ufs_hba *hba)
{ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
/* reset gpio is optional */ if (!host->device_reset) return -EOPNOTSUPP;
/* * The UFS device shall detect reset pulses of 1us, sleep for 10us to * be on the safe side.
*/
ufs_qcom_device_reset_ctrl(hba, true);
usleep_range(10, 15);
/* * 1. We only handle CQs as of now. * 2. Poll queues do not need ESI.
*/
nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs,
ufs_qcom_write_msi_msg); if (ret) {
dev_warn(hba->dev, "Platform MSI not supported or failed, continuing without ESI\n"); return ret; /* Continue without ESI */
}
opp = dev_pm_opp_find_freq_exact_indexed(hba->dev, freq, 0, true); if (IS_ERR(opp)) {
dev_err(hba->dev, "Failed to find OPP for exact frequency %lu\n", freq); return 0;
}
list_for_each_entry(clki, &hba->clk_list_head, list) { if (!strcmp(clki->name, name)) {
found = true; break;
}
idx++;
}
if (!found) {
dev_err(hba->dev, "Failed to find clock '%s' in clk list\n", name);
dev_pm_opp_put(opp); return 0;
}
/** * ufs_qcom_probe - probe routine of the driver * @pdev: pointer to Platform device handle * * Return: zero for success and non-zero for failure.
*/ staticint ufs_qcom_probe(struct platform_device *pdev)
{ int err; struct device *dev = &pdev->dev;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.