if (!nvmet_is_pci_ctrl(ctrl)) {
status = nvmet_report_invalid_opcode(req); goto complete;
}
status = nvmet_check_io_cqid(ctrl, cqid, false); if (status != NVME_SC_SUCCESS) goto complete;
if (!ctrl->cqs[cqid] || nvmet_cq_in_use(ctrl->cqs[cqid])) { /* Some SQs are still using this CQ */
status = NVME_SC_QID_INVALID | NVME_STATUS_DNR; goto complete;
}
log = kzalloc(sizeof(*log), GFP_KERNEL); if (!log) goto out;
if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
status = nvmet_get_smart_log_all(req, log); else
status = nvmet_get_smart_log_nsid(req, log); if (status) goto out_free_log;
/* * The target driver emulates each endurance group as its own * namespace, reusing the nsid as the endurance group identifier.
*/
req->cmd->common.nsid = cpu_to_le32(le16_to_cpu(
req->cmd->get_log_page.lsi));
status = nvmet_req_find_ns(req); if (status) goto out;
log = kzalloc(sizeof(*log), GFP_KERNEL); if (!log) {
status = NVME_SC_INTERNAL; goto out;
}
/* copy the header last once we know the number of groups */
status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
out:
nvmet_req_complete(req, status);
}
staticvoid nvmet_execute_get_log_page(struct nvmet_req *req)
{ if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd))) return;
switch (req->cmd->get_log_page.lid) { case NVME_LOG_SUPPORTED: return nvmet_execute_get_supported_log_pages(req); case NVME_LOG_ERROR: return nvmet_execute_get_log_page_error(req); case NVME_LOG_SMART: return nvmet_execute_get_log_page_smart(req); case NVME_LOG_FW_SLOT: /* * We only support a single firmware slot which always is * active, so we can zero out the whole firmware slot log and * still claim to fully implement this mandatory log page.
*/ return nvmet_execute_get_log_page_noop(req); case NVME_LOG_CHANGED_NS: return nvmet_execute_get_log_changed_ns(req); case NVME_LOG_CMD_EFFECTS: return nvmet_execute_get_log_cmd_effects_ns(req); case NVME_LOG_ENDURANCE_GROUP: return nvmet_execute_get_log_page_endgrp(req); case NVME_LOG_ANA: return nvmet_execute_get_log_page_ana(req); case NVME_LOG_FEATURES: return nvmet_execute_get_log_page_features(req); case NVME_LOG_RMI: return nvmet_execute_get_log_page_rmi(req); case NVME_LOG_RESERVATION: return nvmet_execute_get_log_page_resv(req);
}
pr_debug("unhandled lid %d on qid %d\n",
req->cmd->get_log_page.lid, req->sq->qid);
req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR);
}
/* XXX: figure out what to do about RTD3R/RTD3 */
id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
ctratt = NVME_CTRL_ATTR_HID_128_BIT | NVME_CTRL_ATTR_TBKAS; if (nvmet_is_pci_ctrl(ctrl))
ctratt |= NVME_CTRL_ATTR_RHII;
id->ctratt = cpu_to_le32(ctratt);
id->oacs = 0;
/* * We don't really have a practical limit on the number of abort * comands. But we don't do anything useful for abort either, so * no point in allowing more abort commands than the spec requires.
*/
id->acl = 3;
id->aerl = NVMET_ASYNC_EVENTS - 1;
/* first slot is read-only, only one slot supported */
id->frmw = (1 << 0) | (1 << 1);
id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
id->npss = 0;
/* We support keep-alive timeout in granularity of seconds */
id->kas = cpu_to_le16(NVMET_KAS);
/* * Max command capsule size is sqe + in-capsule data size. * Disable in-capsule data for Metadata capable controllers.
*/
cmd_capsule_size = sizeof(struct nvme_command); if (!ctrl->pi_support)
cmd_capsule_size += req->port->inline_data_size;
id->ioccsz = cpu_to_le32(cmd_capsule_size / 16);
/* Max response capsule size is cqe */
id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
id->msdbd = ctrl->ops->msdbd;
/* * Endurance group identifier is 16 bits, so we can't let namespaces * overflow that since we reuse the nsid
*/
BUILD_BUG_ON(NVMET_MAX_NAMESPACES > USHRT_MAX);
id->endgidmax = cpu_to_le16(NVMET_MAX_NAMESPACES);
/* * Meh, we don't really support any power state. Fake up the same * values that qemu does.
*/
id->psd[0].max_power = cpu_to_le16(0x9c4);
id->psd[0].entry_lat = cpu_to_le32(0x10);
id->psd[0].exit_lat = cpu_to_le32(0x4);
id->nwpc = 1 << 0; /* write protect and no write protect */
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
req->error_loc = offsetof(struct nvme_identify, nsid);
status = NVME_SC_INVALID_NS | NVME_STATUS_DNR; goto out;
}
id = kzalloc(sizeof(*id), GFP_KERNEL); if (!id) {
status = NVME_SC_INTERNAL; goto out;
}
/* return an all zeroed buffer if we can't find an active namespace */
status = nvmet_req_find_ns(req); if (status) {
status = 0; goto done;
}
if (nvmet_ns_revalidate(req->ns)) {
mutex_lock(&req->ns->subsys->lock);
nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
mutex_unlock(&req->ns->subsys->lock);
}
/* * nuse = ncap = nsze isn't always true, but we have no way to find * that out from the underlying device.
*/
id->ncap = id->nsze =
cpu_to_le64(req->ns->size >> req->ns->blksize_shift); switch (req->port->ana_state[req->ns->anagrpid]) { case NVME_ANA_INACCESSIBLE: case NVME_ANA_PERSISTENT_LOSS: break; default:
id->nuse = id->nsze; break;
}
if (req->ns->bdev)
nvmet_bdev_set_limits(req->ns->bdev, id);
/* * We just provide a single LBA format that matches what the * underlying device reports.
*/
id->nlbaf = 0;
id->flbas = 0;
/* * Our namespace might always be shared. Not just with other * controllers, but also with any other user of the block device.
*/
id->nmic = NVME_NS_NMIC_SHARED;
id->anagrpid = cpu_to_le32(req->ns->anagrpid);
/* * NSID values 0xFFFFFFFE and NVME_NSID_ALL are invalid * See NVMe Base Specification, Active Namespace ID list (CNS 02h).
*/ if (min_nsid == 0xFFFFFFFE || min_nsid == NVME_NSID_ALL) {
req->error_loc = offsetof(struct nvme_identify, nsid);
status = NVME_SC_INVALID_NS | NVME_STATUS_DNR; goto out;
}
list = kzalloc(buf_size, GFP_KERNEL); if (!list) {
status = NVME_SC_INTERNAL; goto out;
}
nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) { if (ns->nsid <= min_nsid) continue; if (match_css && req->ns->csi != req->cmd->identify.csi) continue;
list[i++] = cpu_to_le32(ns->nsid); if (i == buf_size / sizeof(__le32)) break;
}
status = nvmet_copy_to_sgl(req, 0, list, buf_size);
status = nvmet_req_find_ns(req); if (status) goto out;
id = kzalloc(sizeof(*id), GFP_KERNEL); if (!id) {
status = NVME_SC_INTERNAL; goto out;
}
id->nstat = NVME_NSTAT_NRDY;
id->anagrpid = cpu_to_le32(req->ns->anagrpid);
id->nmic = NVME_NS_NMIC_SHARED; if (req->ns->readonly)
id->nsattr |= NVME_NS_ATTR_RO; if (req->ns->bdev && !bdev_nonrot(req->ns->bdev))
id->nsfeat |= NVME_NS_ROTATIONAL; /* * We need flush command to flush the file's metadata, * so report supporting vwc if backend is file, even * though buffered_io is disable.
*/ if (req->ns->bdev && !bdev_write_cache(req->ns->bdev))
id->nsfeat |= NVME_NS_VWC_NOT_PRESENT;
staticvoid nvmet_execute_identify(struct nvmet_req *req)
{ if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE)) return;
switch (req->cmd->identify.cns) { case NVME_ID_CNS_NS:
nvmet_execute_identify_ns(req); return; case NVME_ID_CNS_CTRL:
nvmet_execute_identify_ctrl(req); return; case NVME_ID_CNS_NS_ACTIVE_LIST:
nvmet_execute_identify_nslist(req, false); return; case NVME_ID_CNS_NS_DESC_LIST:
nvmet_execute_identify_desclist(req); return; case NVME_ID_CNS_CS_NS: switch (req->cmd->identify.csi) { case NVME_CSI_NVM:
nvme_execute_identify_ns_nvm(req); return; case NVME_CSI_ZNS: if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
nvmet_execute_identify_ns_zns(req); return;
} break;
} break; case NVME_ID_CNS_CS_CTRL: switch (req->cmd->identify.csi) { case NVME_CSI_NVM:
nvmet_execute_identify_ctrl_nvm(req); return; case NVME_CSI_ZNS: if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
nvmet_execute_identify_ctrl_zns(req); return;
} break;
} break; case NVME_ID_CNS_NS_ACTIVE_LIST_CS:
nvmet_execute_identify_nslist(req, true); return; case NVME_ID_CNS_NS_CS_INDEP:
nvmet_execute_id_cs_indep(req); return; case NVME_ID_CNS_ENDGRP_LIST:
nvmet_execute_identify_endgrp_list(req); return;
}
/* * A "minimum viable" abort implementation: the command is mandatory in the * spec, but we are not required to do any useful work. We couldn't really * do a useful abort, so don't bother even with waiting for the command * to be executed and return immediately telling the command to abort * wasn't found.
*/ staticvoid nvmet_execute_abort(struct nvmet_req *req)
{ if (!nvmet_check_transfer_len(req, 0)) return;
nvmet_set_result(req, 1);
nvmet_req_complete(req, 0);
}
if (!nvmet_is_pci_ctrl(ctrl)) return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR;
/* * The NVMe base specifications v2.1 recommends supporting 128-bits host * IDs (section 5.1.25.1.28.1). However, that same section also says * that "The controller may support a 64-bit Host Identifier and/or an * extended 128-bit Host Identifier". So simplify this support and do * not support 64-bits host IDs to avoid needing to check that all * controllers associated with the same subsystem all use the same host * ID size.
*/ if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
req->error_loc = offsetof(struct nvme_common_command, cdw11); return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
/* * This feature is not supported for fabrics controllers and mandatory * for PCI controllers.
*/ if (!nvmet_is_pci_ctrl(ctrl)) {
req->error_loc = offsetof(struct nvme_common_command, cdw10); return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
/* * This feature is not supported for fabrics controllers and mandatory * for PCI controllers.
*/ if (!nvmet_is_pci_ctrl(ctrl)) {
req->error_loc = offsetof(struct nvme_common_command, cdw10); return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
result = nvmet_req_find_ns(req); if (result) return result;
mutex_lock(&subsys->lock); if (req->ns->readonly == true)
result = NVME_NS_WRITE_PROTECT; else
result = NVME_NS_NO_WRITE_PROTECT;
nvmet_set_result(req, result);
mutex_unlock(&subsys->lock);
/* * This feature is not supported for fabrics controllers and mandatory * for PCI controllers.
*/ if (!nvmet_is_pci_ctrl(ctrl)) {
req->error_loc = offsetof(struct nvme_common_command, cdw10); return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
status = ctrl->ops->get_feature(ctrl, NVME_FEAT_IRQ_COALESCE, &irqc); if (status != NVME_SC_SUCCESS) return status;
/* * This feature is not supported for fabrics controllers and mandatory * for PCI controllers.
*/ if (!nvmet_is_pci_ctrl(ctrl)) {
req->error_loc = offsetof(struct nvme_common_command, cdw10); return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
}
status = ctrl->ops->get_feature(ctrl, NVME_FEAT_IRQ_CONFIG, &irqcfg); if (status != NVME_SC_SUCCESS) return status;
if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10))) return;
switch (cdw10 & 0xff) { /* * These features are mandatory in the spec, but we don't * have a useful way to implement them. We'll eventually * need to come up with some fake values for these.
*/ #if 0 case NVME_FEAT_POWER_MGMT: break; case NVME_FEAT_TEMP_THRESH: break; case NVME_FEAT_ERR_RECOVERY: break; case NVME_FEAT_WRITE_ATOMIC: break; #endif case NVME_FEAT_ARBITRATION:
status = nvmet_get_feat_arbitration(req); break; case NVME_FEAT_IRQ_COALESCE:
status = nvmet_get_feat_irq_coalesce(req); break; case NVME_FEAT_IRQ_CONFIG:
status = nvmet_get_feat_irq_config(req); break; case NVME_FEAT_ASYNC_EVENT:
nvmet_get_feat_async_event(req); break; case NVME_FEAT_VOLATILE_WC:
nvmet_set_result(req, 1); break; case NVME_FEAT_NUM_QUEUES:
nvmet_set_result(req,
(subsys->max_qid-1) | ((subsys->max_qid-1) << 16)); break; case NVME_FEAT_KATO:
nvmet_get_feat_kato(req); break; case NVME_FEAT_HOST_ID: /* need 128-bit host identifier flag */ if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
req->error_loc =
offsetof(struct nvme_common_command, cdw11);
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break;
}
status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid, sizeof(req->sq->ctrl->hostid)); break; case NVME_FEAT_WRITE_PROTECT:
status = nvmet_get_feat_write_protect(req); break; case NVME_FEAT_RESV_MASK:
status = nvmet_get_feat_resv_notif_mask(req); break; default:
req->error_loc =
offsetof(struct nvme_common_command, cdw10);
status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break;
}
if (nvme_is_fabrics(cmd)) return nvmet_fabrics_admin_cmd_data_len(req); if (nvmet_is_disc_subsys(nvmet_req_subsys(req))) return nvmet_discovery_cmd_data_len(req);
switch (cmd->common.opcode) { case nvme_admin_get_log_page: return nvmet_get_log_page_len(cmd); case nvme_admin_identify: return NVME_IDENTIFY_DATA_SIZE; case nvme_admin_get_features: return nvmet_feat_data_len(req, le32_to_cpu(cmd->common.cdw10)); default: return 0;
}
}
if (nvme_is_fabrics(cmd)) return nvmet_parse_fabrics_admin_cmd(req); if (nvmet_is_disc_subsys(nvmet_req_subsys(req))) return nvmet_parse_discovery_cmd(req);
ret = nvmet_check_ctrl_status(req); if (unlikely(ret)) return ret;
/* For PCI controllers, admin commands shall not use SGL. */ if (nvmet_is_pci_ctrl(req->sq->ctrl) && !req->sq->qid &&
cmd->common.flags & NVME_CMD_SGL_ALL) return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
if (nvmet_is_passthru_req(req)) return nvmet_parse_passthru_admin_cmd(req);
switch (cmd->common.opcode) { case nvme_admin_delete_sq:
req->execute = nvmet_execute_delete_sq; return 0; case nvme_admin_create_sq:
req->execute = nvmet_execute_create_sq; return 0; case nvme_admin_get_log_page:
req->execute = nvmet_execute_get_log_page; return 0; case nvme_admin_delete_cq:
req->execute = nvmet_execute_delete_cq; return 0; case nvme_admin_create_cq:
req->execute = nvmet_execute_create_cq; return 0; case nvme_admin_identify:
req->execute = nvmet_execute_identify; return 0; case nvme_admin_abort_cmd:
req->execute = nvmet_execute_abort; return 0; case nvme_admin_set_features:
req->execute = nvmet_execute_set_features; return 0; case nvme_admin_get_features:
req->execute = nvmet_execute_get_features; return 0; case nvme_admin_async_event:
req->execute = nvmet_execute_async_event; return 0; case nvme_admin_keep_alive:
req->execute = nvmet_execute_keep_alive; return 0; default: return nvmet_report_invalid_opcode(req);
}
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.5 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.