staticunsignedint rw_queues;
module_param_cb(rw_queues, &rw_queue_count_ops, &rw_queues, 0644);
MODULE_PARM_DESC(rw_queues, "Number of interrupt driven I/O queues used for rw. Default value is nr_cpus");
staticunsignedint read_queues;
module_param_cb(read_queues, &read_queue_count_ops, &read_queues, 0644);
MODULE_PARM_DESC(read_queues, "Number of interrupt driven read queues used for read. Default value is 0");
staticunsignedint poll_queues = 1;
module_param_cb(poll_queues, &poll_queue_count_ops, &poll_queues, 0644);
MODULE_PARM_DESC(poll_queues, "Number of poll queues used for r/w. Default value is 1");
/** * ufshcd_mcq_config_mac - Set the #Max Activ Cmds. * @hba: per adapter instance * @max_active_cmds: maximum # of active commands to the device at any time. * * The controller won't send more than the max_active_cmds to the device at * any time.
*/ void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds)
{
u32 val;
val = ufshcd_readl(hba, REG_UFS_MCQ_CFG);
val &= ~MCQ_CFG_MAC_MASK;
val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds - 1);
ufshcd_writel(hba, val, REG_UFS_MCQ_CFG);
}
EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac);
/** * ufshcd_mcq_req_to_hwq - find the hardware queue on which the * request would be issued. * @hba: per adapter instance * @req: pointer to the request to be issued * * Return: the hardware queue instance on which the request will be or has * been queued. %NULL if the request has already been freed.
*/ struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba, struct request *req)
{ struct blk_mq_hw_ctx *hctx = READ_ONCE(req->mq_hctx);
/** * ufshcd_mcq_queue_cfg_addr - get an start address of the MCQ Queue Config * Registers. * @hba: per adapter instance * * Return: Start address of MCQ Queue Config Registers in HCI
*/ unsignedint ufshcd_mcq_queue_cfg_addr(struct ufs_hba *hba)
{ return FIELD_GET(QCFGPTR, hba->mcq_capabilities) * 0x200;
}
EXPORT_SYMBOL_GPL(ufshcd_mcq_queue_cfg_addr);
/** * ufshcd_mcq_decide_queue_depth - decide the queue depth * @hba: per adapter instance * * Return: queue-depth on success, non-zero on error * * MAC - Max. Active Command of the Host Controller (HC) * HC wouldn't send more than this commands to the device. * Calculates and adjusts the queue depth based on the depth * supported by the HC and ufs device.
*/ int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
{ int mac;
if (!hba->vops || !hba->vops->get_hba_mac) { /* * Extract the maximum number of active transfer tasks value * from the host controller capabilities register. This value is * 0-based.
*/
hba->capabilities =
ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
mac = hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS_MCQ;
mac++;
} else {
mac = hba->vops->get_hba_mac(hba);
} if (mac < 0) goto err;
WARN_ON_ONCE(!hba->dev_info.bqueuedepth); /* * max. value of bqueuedepth = 256, mac is host dependent. * It is mandatory for UFS device to define bQueueDepth if * shared queuing architecture is enabled.
*/ return min_t(int, mac, hba->dev_info.bqueuedepth);
err:
dev_err(hba->dev, "Failed to get mac, err=%d\n", mac); return mac;
}
if (hba_maxq < tot_queues) {
dev_err(hba->dev, "Total queues (%d) exceeds HC capacity (%d)\n",
tot_queues, hba_maxq); return -EOPNOTSUPP;
}
/* * Device should support at least one I/O queue to handle device * commands via hba->dev_cmd_queue.
*/ if (hba_maxq == poll_queues) {
dev_err(hba->dev, "At least one non-poll queue required\n"); return -EOPNOTSUPP;
}
rem = hba_maxq;
if (rw_queues) {
hba->nr_queues[HCTX_TYPE_DEFAULT] = rw_queues;
rem -= hba->nr_queues[HCTX_TYPE_DEFAULT];
} else {
rw_queues = num_possible_cpus();
}
if (poll_queues) {
hba->nr_queues[HCTX_TYPE_POLL] = poll_queues;
rem -= hba->nr_queues[HCTX_TYPE_POLL];
}
if (read_queues) {
hba->nr_queues[HCTX_TYPE_READ] = read_queues;
rem -= hba->nr_queues[HCTX_TYPE_READ];
}
if (!hba->nr_queues[HCTX_TYPE_DEFAULT])
hba->nr_queues[HCTX_TYPE_DEFAULT] = min3(rem, rw_queues,
num_possible_cpus());
for (i = 0; i < HCTX_MAX_TYPES; i++)
host->nr_hw_queues += hba->nr_queues[i];
/* * Current MCQ specification doesn't provide a Task Tag or its equivalent in * the Completion Queue Entry. Find the Task Tag using an indirect method.
*/ staticint ufshcd_mcq_get_tag(struct ufs_hba *hba, struct cq_entry *cqe)
{
u64 addr;
/* sizeof(struct utp_transfer_cmd_desc) must be a multiple of 128 */
BUILD_BUG_ON(sizeof(struct utp_transfer_cmd_desc) & GENMASK(6, 0));
/* Bits 63:7 UCD base address, 6:5 are reserved, 4:0 is SQ ID */
addr = (le64_to_cpu(cqe->command_desc_base_addr) & CQE_UCD_BA) -
hba->ucdl_dma_addr;
return div_u64(addr, ufshcd_get_ucd_size(hba));
}
staticvoid ufshcd_mcq_process_cqe(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
{ struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq); int tag = ufshcd_mcq_get_tag(hba, cqe);
if (cqe->command_desc_base_addr) {
ufshcd_compl_one_cqe(hba, tag, cqe); /* After processed the cqe, mark it empty (invalid) entry */
cqe->command_desc_base_addr = 0;
}
}
/* Save the base addresses for quicker access */
hwq->mcq_sq_head = mcq_opr_base(hba, OPR_SQD, i) + REG_SQHP;
hwq->mcq_sq_tail = mcq_opr_base(hba, OPR_SQD, i) + REG_SQTP;
hwq->mcq_cq_head = mcq_opr_base(hba, OPR_CQD, i) + REG_CQHP;
hwq->mcq_cq_tail = mcq_opr_base(hba, OPR_CQD, i) + REG_CQTP;
/* Reinitializing is needed upon HC reset */
hwq->sq_tail_slot = hwq->cq_tail_slot = hwq->cq_head_slot = 0;
/* Enable Tail Entry Push Status interrupt only for non-poll queues */ if (i < hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL])
writel(1, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIE);
/** * ufshcd_mcq_nullify_sqe - Nullify the submission queue entry. * Write the sqe's Command Type to 0xF. The host controller will not * fetch any sqe with Command Type = 0xF. * * @utrd: UTP Transfer Request Descriptor to be nullified.
*/ staticvoid ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc *utrd)
{
utrd->header.command_type = 0xf;
}
/** * ufshcd_mcq_sqe_search - Search for the command in the submission queue * If the command is in the submission queue and not issued to the device yet, * nullify the sqe so the host controller will skip fetching the sqe. * * @hba: per adapter instance. * @hwq: Hardware Queue to be searched. * @task_tag: The command's task tag. * * Return: true if the SQE containing the command is present in the SQ * (not fetched by the controller); returns false if the SQE is not in the SQ.
*/ staticbool ufshcd_mcq_sqe_search(struct ufs_hba *hba, struct ufs_hw_queue *hwq, int task_tag)
{ struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; struct utp_transfer_req_desc *utrd;
__le64 cmd_desc_base_addr; bool ret = false;
u64 addr, match;
u32 sq_head_slot;
if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC) returntrue;
/** * ufshcd_mcq_abort - Abort the command in MCQ. * @cmd: The command to be aborted. * * Return: SUCCESS or FAILED error codes
*/ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
{ struct Scsi_Host *host = cmd->device->host; struct ufs_hba *hba = shost_priv(host); int tag = scsi_cmd_to_rq(cmd)->tag; struct ufshcd_lrb *lrbp = &hba->lrb[tag]; struct ufs_hw_queue *hwq; int err;
/* Skip task abort in case previous aborts failed and report failure */ if (lrbp->req_abort_skip) {
dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
__func__, tag); return FAILED;
}
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); if (!hwq) {
dev_err(hba->dev, "%s: skip abort. cmd at tag %d already completed.\n",
__func__, tag); return FAILED;
}
if (ufshcd_mcq_sqe_search(hba, hwq, tag)) { /* * Failure. The command should not be "stuck" in SQ for * a long time which resulted in command being aborted.
*/
dev_err(hba->dev, "%s: cmd found in sq. hwq=%d, tag=%d\n",
__func__, hwq->id, tag); return FAILED;
}
/* * The command is not in the submission queue, and it is not * in the completion queue either. Query the device to see if * the command is being processed in the device.
*/
err = ufshcd_try_to_abort_task(hba, tag); if (err) {
dev_err(hba->dev, "%s: device abort failed %d\n", __func__, err);
lrbp->req_abort_skip = true; return FAILED;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.