module_param_cb(uic_cmd_timeout, &uic_cmd_timeout_ops, &uic_cmd_timeout, 0644);
MODULE_PARM_DESC(uic_cmd_timeout, "UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 5 seconds inclusively");
module_param_cb(dev_cmd_timeout, &dev_cmd_timeout_ops, &dev_cmd_timeout, 0644);
MODULE_PARM_DESC(dev_cmd_timeout, "UFS Device command timeout in milliseconds. Defaults to 1.5s. Supported values range from 1ms to 30 seconds inclusively");
conststruct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
[UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
[UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
[UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
[UFS_PM_LVL_3] = {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
[UFS_PM_LVL_4] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
[UFS_PM_LVL_5] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE}, /* * For DeepSleep, the link is first put in hibern8 and then off. * Leaving the link in hibern8 is not supported.
*/
[UFS_PM_LVL_6] = {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
};
/* trace UPIU also */
ufshcd_add_cmd_upiu_trace(hba, tag, str_t); if (!trace_ufshcd_command_enabled()) return;
opcode = cmd->cmnd[0];
if (opcode == READ_10 || opcode == WRITE_10) { /* * Currently we only fully trace read(10) and write(10) commands
*/
transfer_len =
be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
lba = scsi_get_lba(cmd); if (opcode == WRITE_10)
group_id = lrbp->cmd->cmnd[6];
} elseif (opcode == UNMAP) { /* * The number of Bytes to be unmapped beginning with the lba.
*/
transfer_len = blk_rq_bytes(rq);
lba = scsi_get_lba(cmd);
}
intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
if (hba->mcq_enabled) { struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq);
/** * ufshcd_print_pwr_info - print power params as saved in hba * power info * @hba: per-adapter instance
*/ staticvoid ufshcd_print_pwr_info(struct ufs_hba *hba)
{ staticconstchar * const names[] = { "INVALID MODE", "FAST MODE", "SLOW_MODE", "INVALID MODE", "FASTAUTO_MODE", "SLOWAUTO_MODE", "INVALID MODE",
};
/* * Using dev_dbg to avoid messages during runtime PM to avoid * never-ending cycles of messages written back to storage by user space * causing runtime resume, causing more messages and so on.
*/
dev_dbg(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
__func__,
hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
names[hba->pwr_info.pwr_rx],
names[hba->pwr_info.pwr_tx],
hba->pwr_info.hs_rate);
}
staticvoid ufshcd_device_reset(struct ufs_hba *hba)
{ int err;
err = ufshcd_vops_device_reset(hba);
if (!err) {
ufshcd_set_ufs_dev_active(hba); if (ufshcd_is_wb_allowed(hba)) {
hba->dev_info.wb_enabled = false;
hba->dev_info.wb_buf_flush_enabled = false;
} if (hba->dev_info.rtc_type == UFS_RTC_RELATIVE)
hba->dev_info.rtc_time_baseline = 0;
} if (err != -EOPNOTSUPP)
ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
}
void ufshcd_delay_us(unsignedlong us, unsignedlong tolerance)
{ if (!us) return;
if (us < 10)
udelay(us); else
usleep_range(us, us + tolerance);
}
EXPORT_SYMBOL_GPL(ufshcd_delay_us);
/** * ufshcd_wait_for_register - wait for register value to change * @hba: per-adapter interface * @reg: mmio register offset * @mask: mask to apply to the read register value * @val: value to wait for * @interval_us: polling interval in microseconds * @timeout_ms: timeout in milliseconds * * Return: -ETIMEDOUT on error, zero on success.
*/ staticint ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
u32 val, unsignedlong interval_us, unsignedlong timeout_ms)
{
u32 v;
val &= mask; /* ignore bits that we don't intend to wait on */
/** * ufshcd_get_intr_mask - Get the interrupt bit mask * @hba: Pointer to adapter instance * * Return: interrupt bit mask per version
*/ staticinline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
{ if (hba->ufs_version <= ufshci_version(2, 0)) return INTERRUPT_MASK_ALL_VER_11;
return INTERRUPT_MASK_ALL_VER_21;
}
/** * ufshcd_get_ufs_version - Get the UFS version supported by the HBA * @hba: Pointer to adapter instance * * Return: UFSHCI version supported by the controller
*/ staticinline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
{
u32 ufshci_ver;
/* * UFSHCI v1.x uses a different version scheme, in order * to allow the use of comparisons with the ufshci_version * function, we convert it to the same scheme as ufs 2.0+.
*/ if (ufshci_ver & 0x00010000) return ufshci_version(1, ufshci_ver & 0x00000100);
return ufshci_ver;
}
/** * ufshcd_is_device_present - Check if any device connected to * the host controller * @hba: pointer to adapter instance * * Return: true if device present, false if no device detected
*/ staticinlinebool ufshcd_is_device_present(struct ufs_hba *hba)
{ return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & DEVICE_PRESENT;
}
/** * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status * @lrbp: pointer to local command reference block * @cqe: pointer to the completion queue entry * * This function is used to get the OCS field from UTRD * * Return: the OCS field in the UTRD.
*/ staticenum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp, struct cq_entry *cqe)
{ if (cqe) return le32_to_cpu(cqe->status) & MASK_OCS;
/** * ufshcd_utrl_clear() - Clear requests from the controller request list. * @hba: per adapter instance * @mask: mask with one bit set for each request to be cleared
*/ staticinlinevoid ufshcd_utrl_clear(struct ufs_hba *hba, u32 mask)
{ if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
mask = ~mask; /* * From the UFSHCI specification: "UTP Transfer Request List CLear * Register (UTRLCLR): This field is bit significant. Each bit * corresponds to a slot in the UTP Transfer Request List, where bit 0 * corresponds to request slot 0. A bit in this field is set to ‘0’ * by host software to indicate to the host controller that a transfer * request slot is cleared. The host controller * shall free up any resources associated to the request slot * immediately, and shall set the associated bit in UTRLDBR to ‘0’. The * host software indicates no change to request slots by setting the * associated bits in this field to ‘1’. Bits in this field shall only * be set ‘1’ or ‘0’ by host software when UTRLRSR is set to ‘1’."
*/
ufshcd_writel(hba, ~mask, REG_UTP_TRANSFER_REQ_LIST_CLEAR);
}
/** * ufshcd_utmrl_clear - Clear a bit in UTMRLCLR register * @hba: per adapter instance * @pos: position of the bit to be cleared
*/ staticinlinevoid ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
{ if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR); else
ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
}
/** * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY * @reg: Register value of host controller status * * Return: 0 on success; a positive value if failed.
*/ staticinlineint ufshcd_get_lists_status(u32 reg)
{ return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
}
/** * ufshcd_get_uic_cmd_result - Get the UIC command result * @hba: Pointer to adapter instance * * This function gets the result of UIC command completion * * Return: 0 on success; non-zero value on error.
*/ staticinlineint ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
{ return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
MASK_UIC_COMMAND_RESULT;
}
/** * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command * @hba: Pointer to adapter instance * * This function gets UIC command argument3 * * Return: 0 on success; non-zero value on error.
*/ staticinline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
{ return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
}
/** * ufshcd_is_exception_event - Check if the device raised an exception event * @ucd_rsp_ptr: pointer to response UPIU * * The function checks if the device raised an exception event indicated in * the Device Information field of response UPIU. * * Return: true if exception is raised, false otherwise.
*/ staticinlinebool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
{ return ucd_rsp_ptr->header.device_information & 1;
}
/** * ufshcd_enable_run_stop_reg - Enable run-stop registers, * When run-stop registers are set to 1, it indicates the * host controller that it can process the requests * @hba: per adapter instance
*/ staticvoid ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
{
ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
REG_UTP_TASK_REQ_LIST_RUN_STOP);
ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
}
if (ufshcd_crypto_enable(hba))
val |= CRYPTO_GENERAL_ENABLE;
ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
}
/** * ufshcd_is_hba_active - Get controller state * @hba: per adapter instance * * Return: true if and only if the controller is active.
*/ bool ufshcd_is_hba_active(struct ufs_hba *hba)
{ return ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE;
}
EXPORT_SYMBOL_GPL(ufshcd_is_hba_active);
/** * ufshcd_pm_qos_update - update PM QoS request * @hba: per adapter instance * @on: If True, vote for perf PM QoS mode otherwise power save mode
*/ staticvoid ufshcd_pm_qos_update(struct ufs_hba *hba, bool on)
{
guard(mutex)(&hba->pm_qos_mutex);
if (!hba->pm_qos_enabled) return;
cpu_latency_qos_update_request(&hba->pm_qos_req, on ? 0 : PM_QOS_DEFAULT_VALUE);
}
/** * ufshcd_set_clk_freq - set UFS controller clock frequencies * @hba: per adapter instance * @scale_up: If True, set max possible frequency othewise set low frequency * * Return: 0 if successful; < 0 upon failure.
*/ staticint ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
{ int ret = 0; struct ufs_clk_info *clki; struct list_head *head = &hba->clk_list_head;
if (list_empty(head)) goto out;
list_for_each_entry(clki, head, list) { if (!IS_ERR_OR_NULL(clki->clk)) { if (scale_up && clki->max_freq) { if (clki->curr_freq == clki->max_freq) continue;
ret = clk_set_rate(clki->clk, clki->max_freq); if (ret) {
dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
__func__, clki->name,
clki->max_freq, ret); break;
}
trace_ufshcd_clk_scaling(hba, "scaled up", clki->name,
clki->curr_freq,
clki->max_freq);
opp = dev_pm_opp_find_freq_floor_indexed(hba->dev,
&freq, 0); if (IS_ERR(opp)) return PTR_ERR(opp);
ret = dev_pm_opp_set_opp(hba->dev, opp);
dev_pm_opp_put(opp);
return ret;
}
/** * ufshcd_scale_clks - scale up or scale down UFS controller clocks * @hba: per adapter instance * @freq: frequency to scale * @scale_up: True if scaling up and false if scaling down * * Return: 0 if successful; < 0 upon failure.
*/ staticint ufshcd_scale_clks(struct ufs_hba *hba, unsignedlong freq, bool scale_up)
{ int ret = 0;
ktime_t start = ktime_get();
ret = ufshcd_vops_clk_scale_notify(hba, scale_up, freq, PRE_CHANGE); if (ret) goto out;
if (hba->use_pm_opp)
ret = ufshcd_opp_set_rate(hba, freq); else
ret = ufshcd_set_clk_freq(hba, scale_up); if (ret) goto out;
ret = ufshcd_vops_clk_scale_notify(hba, scale_up, freq, POST_CHANGE); if (ret) { if (hba->use_pm_opp)
ufshcd_opp_set_rate(hba,
hba->devfreq->previous_freq); else
ufshcd_set_clk_freq(hba, !scale_up); goto out;
}
/** * ufshcd_is_devfreq_scaling_required - check if scaling is required or not * @hba: per adapter instance * @freq: frequency to scale * @scale_up: True if scaling up and false if scaling down * * Return: true if scaling is required, false otherwise.
*/ staticbool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba, unsignedlong freq, bool scale_up)
{ struct ufs_clk_info *clki; struct list_head *head = &hba->clk_list_head;
if (list_empty(head)) returnfalse;
if (hba->use_pm_opp) return freq != hba->clk_scaling.target_freq;
list_for_each_entry(clki, head, list) { if (!IS_ERR_OR_NULL(clki->clk)) { if (scale_up && clki->max_freq) { if (clki->curr_freq == clki->max_freq) continue; returntrue;
} elseif (!scale_up && clki->min_freq) { if (clki->curr_freq == clki->min_freq) continue; returntrue;
}
}
}
returnfalse;
}
/* * Determine the number of pending commands by counting the bits in the SCSI * device budget maps. This approach has been selected because a bit is set in * the budget map before scsi_host_queue_ready() checks the host_self_blocked * flag. The host_self_blocked flag can be modified by calling * scsi_block_requests() or scsi_unblock_requests().
*/ static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
{ conststruct scsi_device *sdev; unsignedlong flags;
u32 pending = 0;
/* * Wait until all pending SCSI commands and TMFs have finished or the timeout * has expired. * * Return: 0 upon success; -EBUSY upon timeout.
*/ staticint ufshcd_wait_for_pending_cmds(struct ufs_hba *hba,
u64 wait_timeout_us)
{ int ret = 0;
u32 tm_doorbell;
u32 tr_pending; bool timeout = false, do_last_check = false;
ktime_t start;
ufshcd_hold(hba); /* * Wait for all the outstanding tasks/transfer requests. * Verify by checking the doorbell registers are clear.
*/
start = ktime_get(); do { if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
ret = -EBUSY; goto out;
}
io_schedule_timeout(msecs_to_jiffies(20)); if (ktime_to_us(ktime_sub(ktime_get(), start)) >
wait_timeout_us) {
timeout = true; /* * We might have scheduled out for long time so make * sure to check if doorbells are cleared by this time * or not.
*/
do_last_check = true;
}
} while (tm_doorbell || tr_pending);
if (timeout) {
dev_err(hba->dev, "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
__func__, tm_doorbell, tr_pending);
ret = -EBUSY;
}
out:
ufshcd_release(hba); return ret;
}
/** * ufshcd_scale_gear - scale up/down UFS gear * @hba: per adapter instance * @target_gear: target gear to scale to * @scale_up: True for scaling up gear and false for scaling down * * Return: 0 for success; -EBUSY if scaling can't happen at this time; * non-zero for any other errors.
*/ staticint ufshcd_scale_gear(struct ufs_hba *hba, u32 target_gear, bool scale_up)
{ int ret = 0; struct ufs_pa_layer_attr new_pwr_info;
/* Legacy gear scaling, in case vops_freq_to_gear_speed() is not implemented */ if (scale_up) {
memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info, sizeof(struct ufs_pa_layer_attr));
} else {
memcpy(&new_pwr_info, &hba->pwr_info, sizeof(struct ufs_pa_layer_attr));
if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear ||
hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) { /* save the current power mode */
memcpy(&hba->clk_scaling.saved_pwr_info,
&hba->pwr_info, sizeof(struct ufs_pa_layer_attr));
config_pwr_mode: /* check if the power mode needs to be changed or not? */
ret = ufshcd_config_pwr_mode(hba, &new_pwr_info); if (ret)
dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
__func__, ret,
hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
new_pwr_info.gear_tx, new_pwr_info.gear_rx);
return ret;
}
/* * Wait until all pending SCSI commands and TMFs have finished or the timeout * has expired. * * Return: 0 upon success; -EBUSY upon timeout.
*/ staticint ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
{ int ret = 0; /* * make sure that there are no outstanding requests when * clock scaling is in progress
*/
mutex_lock(&hba->host->scan_mutex);
blk_mq_quiesce_tagset(&hba->host->tag_set);
mutex_lock(&hba->wb_mutex);
down_write(&hba->clk_scaling_lock);
if (!hba->clk_scaling.is_allowed ||
ufshcd_wait_for_pending_cmds(hba, timeout_us)) {
ret = -EBUSY;
up_write(&hba->clk_scaling_lock);
mutex_unlock(&hba->wb_mutex);
blk_mq_unquiesce_tagset(&hba->host->tag_set);
mutex_unlock(&hba->host->scan_mutex); goto out;
}
/* let's not get into low power until clock scaling is completed */
ufshcd_hold(hba);
out: return ret;
}
staticvoid ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err)
{
up_write(&hba->clk_scaling_lock);
/* Enable Write Booster if current gear requires it else disable it */ if (ufshcd_enable_wb_if_scaling_up(hba) && !err)
ufshcd_wb_toggle(hba, hba->pwr_info.gear_rx >= hba->clk_scaling.wb_gear);
/** * ufshcd_devfreq_scale - scale up/down UFS clocks and gear * @hba: per adapter instance * @freq: frequency to scale * @scale_up: True for scaling up and false for scalin down * * Return: 0 for success; -EBUSY if scaling can't happen at this time; non-zero * for any other errors.
*/ staticint ufshcd_devfreq_scale(struct ufs_hba *hba, unsignedlong freq, bool scale_up)
{
u32 old_gear = hba->pwr_info.gear_rx;
u32 new_gear = 0; int ret = 0;
ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC); if (ret) return ret;
/* scale down the gear before scaling down clocks */ if (!scale_up) {
ret = ufshcd_scale_gear(hba, new_gear, false); if (ret) goto out_unprepare;
}
ret = ufshcd_scale_clks(hba, freq, scale_up); if (ret) { if (!scale_up)
ufshcd_scale_gear(hba, old_gear, true); goto out_unprepare;
}
/* scale up the gear after scaling up clocks */ if (scale_up) {
ret = ufshcd_scale_gear(hba, new_gear, true); if (ret) {
ufshcd_scale_clks(hba, hba->devfreq->previous_freq, false); goto out_unprepare;
}
}
if (!ufshcd_is_clkscaling_supported(hba)) return -EINVAL;
if (hba->use_pm_opp) { struct dev_pm_opp *opp;
/* Get the recommended frequency from OPP framework */
opp = devfreq_recommended_opp(dev, freq, flags); if (IS_ERR(opp)) return PTR_ERR(opp);
dev_pm_opp_put(opp);
} else { /* Override with the closest supported frequency */
clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info,
list);
*freq = (unsignedlong) clk_round_rate(clki->clk, *freq);
}
scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
{ if (ufshcd_eh_in_progress(hba)) return 0;
/* Skip scaling clock when clock scaling is suspended */ if (hba->clk_scaling.is_suspended) {
dev_warn(hba->dev, "clock scaling is suspended, skip"); return 0;
}
if (!hba->clk_scaling.active_reqs)
sched_clk_scaling_suspend_work = true;
if (list_empty(clk_list)) goto out;
/* Decide based on the target or rounded-off frequency and update */ if (hba->use_pm_opp)
scale_up = *freq > hba->clk_scaling.target_freq; else
scale_up = *freq == clki->max_freq;
if (!hba->use_pm_opp && !scale_up)
*freq = clki->min_freq;
/* Update the frequency */ if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) {
ret = 0; goto out; /* no state change required */
}
}
start = ktime_get();
ret = ufshcd_devfreq_scale(hba, *freq, scale_up); if (!ret)
hba->clk_scaling.target_freq = *freq;
if (!ufshcd_is_clkscaling_supported(hba)) return -EINVAL;
memset(stat, 0, sizeof(*stat));
guard(spinlock_irqsave)(&hba->clk_scaling.lock);
curr_t = ktime_get(); if (!scaling->window_start_t) goto start_window;
/* * If current frequency is 0, then the ondemand governor considers * there's no initial frequency set. And it always requests to set * to max. frequency.
*/ if (hba->use_pm_opp) {
stat->current_frequency = hba->clk_scaling.target_freq;
} else { struct list_head *clk_list = &hba->clk_list_head; struct ufs_clk_info *clki;
/* Exit from hibern8 */ if (ufshcd_can_hibern8_during_gating(hba)) { /* Prevent gating in this path */
hba->clk_gating.is_suspended = true; if (ufshcd_is_link_hibern8(hba)) {
ret = ufshcd_uic_hibern8_exit(hba); if (ret)
dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
__func__, ret); else
ufshcd_set_link_active(hba);
}
hba->clk_gating.is_suspended = false;
}
}
/** * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release. * Also, exit from hibern8 mode and set the link as active. * @hba: per adapter instance
*/ void ufshcd_hold(struct ufs_hba *hba)
{ bool flush_result; unsignedlong flags;
if (!ufshcd_is_clkgating_allowed(hba) ||
!hba->clk_gating.is_initialized) return;
spin_lock_irqsave(&hba->clk_gating.lock, flags);
hba->clk_gating.active_reqs++;
start: switch (hba->clk_gating.state) { case CLKS_ON: /* * Wait for the ungate work to complete if in progress. * Though the clocks may be in ON state, the link could * still be in hibner8 state if hibern8 is allowed * during clock gating. * Make sure we exit hibern8 state also in addition to * clocks being ON.
*/ if (ufshcd_can_hibern8_during_gating(hba) &&
ufshcd_is_link_hibern8(hba)) {
spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
flush_result = flush_work(&hba->clk_gating.ungate_work); if (hba->clk_gating.is_suspended && !flush_result) return;
spin_lock_irqsave(&hba->clk_gating.lock, flags); goto start;
} break; case REQ_CLKS_OFF: if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(hba,
hba->clk_gating.state); break;
} /* * If we are here, it means gating work is either done or * currently running. Hence, fall through to cancel gating * work and to enable clocks.
*/
fallthrough; case CLKS_OFF:
hba->clk_gating.state = REQ_CLKS_ON;
trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
queue_work(hba->clk_gating.clk_gating_workq,
&hba->clk_gating.ungate_work); /* * fall through to check if we should wait for this * work to be done or not.
*/
fallthrough; case REQ_CLKS_ON:
spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
flush_work(&hba->clk_gating.ungate_work); /* Make sure state is CLKS_ON before returning */
spin_lock_irqsave(&hba->clk_gating.lock, flags); goto start; default:
dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
__func__, hba->clk_gating.state); break;
}
spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
}
EXPORT_SYMBOL_GPL(ufshcd_hold);
scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) { /* * In case you are here to cancel this work the gating state * would be marked as REQ_CLKS_ON. In this case save time by * skipping the gating work and exit after changing the clock * state to CLKS_ON.
*/ if (hba->clk_gating.is_suspended ||
hba->clk_gating.state != REQ_CLKS_OFF) {
hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(hba,
hba->clk_gating.state); return;
}
/* put the link into hibern8 mode before turning off clocks */ if (ufshcd_can_hibern8_during_gating(hba)) {
ret = ufshcd_uic_hibern8_enter(hba); if (ret) {
hba->clk_gating.state = CLKS_ON;
dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
__func__, ret);
trace_ufshcd_clk_gating(hba,
hba->clk_gating.state); return;
}
ufshcd_set_link_hibern8(hba);
}
ufshcd_disable_irq(hba);
ufshcd_setup_clocks(hba, false);
/* Put the host controller in low power mode if possible */
ufshcd_hba_vreg_set_lpm(hba); /* * In case you are here to cancel this work the gating state * would be marked as REQ_CLKS_ON. In this case keep the state * as REQ_CLKS_ON which would anyway imply that clocks are off * and a request to turn them on is pending. By doing this way, * we keep the state machine in tact and this would ultimately * prevent from doing cancel work multiple times when there are * new requests arriving before the current cancel work is done.
*/
guard(spinlock_irqsave)(&hba->clk_gating.lock); if (hba->clk_gating.state == REQ_CLKS_OFF) {
hba->clk_gating.state = CLKS_OFF;
trace_ufshcd_clk_gating(hba,
hba->clk_gating.state);
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.